From 0840e3afb546c83ed03df1273c93c7eb644ab813 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Wed, 8 Apr 2015 22:13:05 -0700 Subject: [PATCH 001/347] first commit --- .gitignore | 1 + ConjunctiveQueryParser.scala | 342 +++++++++++++++++++++++++++++++++++ test.scala | 55 ++++++ 3 files changed, 398 insertions(+) create mode 100644 .gitignore create mode 100644 ConjunctiveQueryParser.scala create mode 100644 test.scala diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..6b468b62a --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*.class diff --git a/ConjunctiveQueryParser.scala b/ConjunctiveQueryParser.scala new file mode 100644 index 000000000..ea6f5bcfe --- /dev/null +++ b/ConjunctiveQueryParser.scala @@ -0,0 +1,342 @@ +import scala.util.parsing.combinator._ +import scala.collection.immutable.HashMap +/* + This file parses an extended form of datalog like sugar. + + It allows schema declarations + + SomeOther(realname, otherattribute) + + And queries + + Q(x,y) :- R(x,y), SomeOther(y, z) + + Using the schema can SQLized as + + SELECT R1.x,R2.y + FROM R as R1,SomeOther as R2 + WHERE R1.y = R2.realname + + We translate by introducing aliases R1, R2 , etc. to deal with + repeated symbols. + + TODO: + ================= + + Our schema needs to know whether a symbol is this a query table (and + so should contain an _id) field or is a regular table from the + user. + + If a head term is not mentioned in the schema, its assumed it is a + query table that this code must create. + + If one wants to explicilty mention a query table in the schema, they + do so with a trailing exclamation point as follows + + Q(x,y)!; + +Consider + + Q(x) :- R(x,f) weight=f + + ... R is likely *not* a variable table ... we record its translation below. + + In contrast, Q(x) :- R(x),S(x) ... coule be treated as variable tables. Hence, the schema has: + + R(x,f) // regular table + R(x,f)! // variable table. + + */ + +/* TODOs: + + Refactor schema object and introduce error checking (unsafe queries, + unordered attributes, etc.). +*/ + +// *************************************** +// * The union types for for the parser. * +// *************************************** +trait Statement +case class Variable(varName : String, relName: String, index : Int ) +case class Atom(name : String, terms : List[Variable]) +case class ConjunctiveQuery(head: Atom, body: List[Atom]) + +case class WeightedRule(q : ConjunctiveQuery, weights : Option[List[String]]) extends Statement // Weighted rule +case class SchemaElement( a : Atom , query : Boolean ) extends Statement // atom and whether this is a query relation. + + +// Parser +class ConjunctiveQueryParser extends JavaTokenParsers { + // Odd definitions, but we'll keep them. + def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} + def stringliteral2: Parser[String] = """[a-zA-Z_0-9\.]*""".r ^^ {case (x) => x} + def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} + + // relation names and columns are just strings. + def relation_name: Parser[String] = stringliteral ^^ {case (x) => x} + def col : Parser[String] = stringliteral ^^ { case(x) => x } + + def atom: Parser[Atom] = relation_name ~ "(" ~ rep1sep(col, ",") ~ ")" ^^ { + case (r ~ "(" ~ cols ~ ")") => { + val vars = cols.zipWithIndex map { case(name,i) => Variable(name, r, i) } + Atom(r,vars) + } + } + + def query : Parser[ConjunctiveQuery] = atom ~ ":-" ~ rep1sep(atom, ",") ^^ { + case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) + } + + def schema_element : Parser[SchemaElement] = atom ~ opt("!") ^^ { + case (a ~ None) => SchemaElement(a,true) + case (a ~ Some(_)) => SchemaElement(a,false) + } + + + def rule : Parser[WeightedRule] = query ~ opt( "weight=" ~ rep1sep(col, ",")) ^^ { + case (q ~ Some("weight=" ~ weights)) => WeightedRule(q,Some(weights)) + case (q ~ None) => WeightedRule(q,None) + } + + // rules or schema elements in aribitrary order + def statement : Parser[Statement] = (rule | schema_element) ^^ {case(x) => x} + + def statements : Parser[List[Statement]] = rep1sep(statement, ";") ^^ { case(x) => x } +} + + +// This handles the schema statements. +// It can tell you if a predicate is a "query" predicate or a "ground prediate" +// and it resolves Variables their correct and true name in the schema, i.e. R(x,y) then x could be Attribute1 declared. +class StatementSchema( statements : List[Statement] ) { + // TODO: refactor the schema into a class that constructs and + // manages these maps. Also it should have appropriate + // abstractions and error handling for missing values. + // ** Start refactor. + var schema : Map[ Tuple2[String,Int], String ] = new HashMap[ Tuple2[String,Int], String ]() + + var ground_relations : Map[ String, Boolean ] = new HashMap[ String, Boolean ]() + + def init() = { + // generate the statements. + statements.foreach { + case SchemaElement(Atom(r, terms),query) => + terms.foreach { + case Variable(n,r,i) => + schema += { (r,i) -> n } + ground_relations += { r -> query } // record whether a query or a ground term. + } + case WeightedRule(_,_) => () + } + println(schema) + println(ground_relations) + } + + init() + + // Given a variable, resolve it. TODO: This should give a warning, + // if we encouter a variable that is not in this map, then something + // odd has happened. + def resolveName( v : Variable ) : String = { + v match { case Variable(v,relName,i) => + if(schema contains (relName,i)) { + schema(relName,i) + } else { + return v // I do not like this default, as it allows some errors. TOOD: MAKE MORE PRECISE! + } + } + } + + // The default is query term. + def isQueryTerm( relName : String ): Boolean = { + if( ground_relations contains relName ) !ground_relations(relName) else true + } +} + +// This is responsible for schema elements within a given query, e.g., +// what is the canonical version of x? (i.e., the first time it is +// mentioned in the body. This is useful to translate to SQL (join +// conditions, select, etc.) +class QuerySchema(q : ConjunctiveQuery) { + var query_schema = new HashMap[ String, Tuple2[Int,Variable] ]() + + // maps each variable name to a canonical version of itself (first occurence in body in left-to-right order) + // index is the index of the subgoal/atom this variable is found in the body. + // variable is the complete Variable type for the found variable. + def generateCanonicalVar() = { + q.body.zipWithIndex.foreach { + case (Atom(relName,terms),index) => { + terms.foreach { + case Variable(v, r, i) => + if( ! (query_schema contains v) ) + query_schema += { v -> (index, Variable(v,r,i) ) } + } + } + } + } + generateCanonicalVar() // initialize + + // accessors + def getBodyIndex( varName : String ) : Int = { query_schema(varName)._1 } + def getVar(varName : String ) : Variable = { query_schema(varName)._2 } + +} +object ConjunctiveQueryParser extends ConjunctiveQueryParser { + + // This is generic code that generates the FROM with positional aliasing R0, R1, etc. + // and the corresponding WHERE clause (equating all variables) + def generateSQLBody(ss : StatementSchema, z : ConjunctiveQuery) : String = { + val bodyNames = ( z.body.zipWithIndex map { case(x,i) => s"${x.name} as R${i}"}).mkString(",") + // Simple logic for the where clause, first find every first occurence of a + // and stick it in a map. + val qs = new QuerySchema(z) + + val whereClause = z.body.zipWithIndex flatMap { + case (Atom(relName, terms),body_index) => + terms flatMap { + case Variable(varName, relName, index) => + val canonical_body_index = qs.getBodyIndex(varName) + + if (canonical_body_index != body_index) { + val real_attr_name1 = ss.resolveName( Variable(varName, relName, index) ) + val real_attr_name2 = ss.resolveName( qs.getVar(varName)) + Some(s"R${ body_index }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") + } else { None } + } + } + val whereClauseStr = whereClause match { + case Nil => "" + case _ => s"""WHERE ${whereClause.mkString(" AND ")}""" + } + + s"""FROM ${ bodyNames } ${ whereClauseStr }""" + } + // generate the node portion (V) of the factor graph + def nodeRule(ss : StatementSchema, z : ConjunctiveQuery) : String = { + val headTerms = z.head.terms map { + case Variable(v,r,i) => s"R${i}.${ss.resolveName(Variable(v,r,i)) }" + } + val headTermsStr = ( "0 as _id" :: headTerms ).mkString(",") + s"""CREATE TABLE ${ z.head.name } AS + SELECT DISTINCT ${ headTermsStr } + ${ generateSQLBody(ss,z) } + """ + } + + + // The input is a weighted rule and our goal is to generate both the + // node query and the (hyper) edge query. The node query is + // straightforward using our previous code. + + // The edge query has three parts. + + // The FROM and WHERE clause contain the same terms from node rule, with two extras. + // (1) We add the head atom into the FROM clause. + // (2) We add the join conditions to the WHERE clause between the head atom and the body. + // In the code below, we create a "fake CQ" and generate its body (ignoring the head) + + // The SELECT clause of the query is a bit interesting. + // (1) The SELECT clause contains the id of the head relation (if it is a query term) + // (2) The SELECT clause should also contain the weight attributes (resolved properly) + // (2) There should be an array_agg( tuple(id1,id2,..) ) of the all query relations in the body. + + // GROUP BY + // We should have a group that contains the head variable and the weight attributes. + def weightedRule( ss: StatementSchema, r : WeightedRule ) : Tuple2[Option[String], Option[String] ] = { + val node_query = if (ss.isQueryTerm(r.q.head.name)) Some(nodeRule(ss,r.q)) else None + val edge_query = { + // in the code below, we rely on the head being the last atom for indexing (since we index R{index}) + val fakeBody = r.q.body :+ r.q.head + val fakeCQ = ConjunctiveQuery(r.q.head, fakeBody) // we will just use the fakeBody below. + + // Generate the body of the query. + val qs = new QuerySchema( r.q ) + val body_attributes = r.q.body.zipWithIndex flatMap { + // check if relName is a ground term, if so skip it. + // if not, generate the id column. + case (Atom(r,_),i) => + if(ss.isQueryTerm(r)) Some(s"R${i}._id") else None + } // we know have all variables in the body + + // Construct the various terms for the select and group by + val factor_id_select = Some("0 as _fid") + val factor_id = Some("_fid") + val head_id = if (ss.isQueryTerm(r.q.head.name)) Some(s"R${ r.q.body.length }._id") else None + + // does array agg need a tuple constructor? + val array_agg = if (body_attributes.length > 0) Some(s"array_agg(${ body_attributes.mkString(", ") })") else None + + val uw_str = + r.weights match { + case None => None + case Some(w) => + val uw = w map { + case(s) => + s"R${ qs.getBodyIndex(s) }.${ ss.resolveName( qs.getVar(s) ) }" + } + Some(s"${ uw.mkString(", ") }") + } + + val select_str = (List(factor_id_select, head_id, array_agg, uw_str) flatMap { case(u) => u }).mkString(", ") + val group_str = (List(factor_id, head_id, uw_str) flatMap { case(u) => (u) }).mkString(", ") + + val u = s""" + SELECT ${select_str} + ${ generateSQLBody(ss, fakeCQ) } + GROUP BY ${group_str} """ + // if no random variables in the query then don't emit a factor term + if (ss.isQueryTerm(r.q.head.name) || body_attributes.length > 0) Some(u) else None + } + (node_query, edge_query) + } + + /* + T(base_attr); + S(a1,a2) + Q(x) :- S(x,y),T(y) + Should generate. + + Node query: + CREATE TABLE Q AS + SELECT 0 as _id, R0.a1 + FROM S as R0,T as R1 + WHERE R0.a2 = R1.base_attr + + Edge Query (if S and T are probabilistic) + SELECT Q._id, array_agg( (S._id, T_.id) ) + FROM Q as R0,S as R1,T as R2 + WHERE S.y = T.base_attr AND + Q.x = S.x AND Q.z = S.z + + Factor Function: OR + + ======= + R(x,y) (assume non probabilistic) + + Q(x) :- R(x,f) weight=f + + Node Query: + CREATE TABLE Q AS + SELECT DISTINCT 0 as _id, x FROM R + + Edge Query: + SELECT 0 as _fid, Q.id, R.f as w + FROM Q, R + WHERE Q.x = R.x + + ======= + + */ + def main(args: Array[String]) = { + val q = parse(statements, "S(a1,a2); R(pk,f); Q(x) :- R(x,f) weight=f; Q(x) :- S(x,y),T(y); T(base_attr)!; R(x,y) :- U(x,y); S(x,y) :- R(x,y);") + val schema = new StatementSchema( q.get ) + + val queries = q.get flatMap { + case _ : SchemaElement => None + case w : WeightedRule => + Some(weightedRule(schema,w)) + } + queries.foreach { case(query) => println(query) } + } +} diff --git a/test.scala b/test.scala new file mode 100644 index 000000000..e8917dff9 --- /dev/null +++ b/test.scala @@ -0,0 +1,55 @@ +import scala.util.parsing.combinator._ +import scala.collection.immutable.HashMap + +trait Statement +case class Variable(varName : String, relName: String, index : Int ) +case class Atom(name : String, terms : List[Variable]) +case class ConjunctiveQuery(head: Atom, body: List[Atom]) + +case class WeightedRule(q : ConjunctiveQuery, weights : Option[List[String]]) extends Statement // Weighted rule +case class SchemaElement( a : Atom , query : Boolean ) extends Statement // atom and whether this is a query relation. + + +// Parser +class ConjunctiveQueryParser extends JavaTokenParsers { + def stringliteral: Parser[String] = """[\w]+""".r + + // relation names and columns are just strings. + def relation_name: Parser[String] = stringliteral ^^ {case (x) => x} + def col : Parser[String] = stringliteral ^^ { case(x) => x } + + def atom: Parser[Atom] = relation_name ~ "(" ~ rep1sep(col, ",") ~ ")" ^^ { + case (r ~ "(" ~ cols ~ ")") => { + val vars = cols.zipWithIndex map { case(name,i) => Variable(name, r, i) } + Atom(r,vars) + } + } + + def query : Parser[ConjunctiveQuery] = atom ~ ":-" ~ rep1sep(atom, ",") ^^ { + case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) + } + + def schema_element : Parser[SchemaElement] = atom ~ opt("!") ^^ { + case (a ~ None) => SchemaElement(a,true) + case (a ~ Some(_)) => SchemaElement(a,false) + } + + + def rule : Parser[WeightedRule] = query ~ opt( "weight=" ~ rep1sep(col, ",")) ^^ { + case (q ~ Some("weight=" ~ weights)) => WeightedRule(q,Some(weights)) + case (q ~ None) => WeightedRule(q,None) + } + + // rules or schema elements in aribitrary order + def statement : Parser[Statement] = (rule | schema_element) ^^ {case(x) => x} + + def statements : Parser[List[Statement]] = rep1sep(statement, ";") ^^ { case(x) => x } +} + +object ConjunctiveQueryParser extends ConjunctiveQueryParser { + + def main(args: Array[String]) = { + val q = parse(statements, args(0)) + println(q.get) + } +} From ccbc9881f74e5f1d29c1701368ac9481ecb99a2e Mon Sep 17 00:00:00 2001 From: feiranwang Date: Wed, 8 Apr 2015 22:21:09 -0700 Subject: [PATCH 002/347] minor change --- test.scala => Test.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename test.scala => Test.scala (96%) diff --git a/test.scala b/Test.scala similarity index 96% rename from test.scala rename to Test.scala index e8917dff9..facf6831b 100644 --- a/test.scala +++ b/Test.scala @@ -46,7 +46,7 @@ class ConjunctiveQueryParser extends JavaTokenParsers { def statements : Parser[List[Statement]] = rep1sep(statement, ";") ^^ { case(x) => x } } -object ConjunctiveQueryParser extends ConjunctiveQueryParser { +object Test extends ConjunctiveQueryParser { def main(args: Array[String]) = { val q = parse(statements, args(0)) From 779244f1b8feea8b7a5c2f6596a1e002b5c04dae Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 10 Apr 2015 15:55:13 -0700 Subject: [PATCH 003/347] add extraction rule --- Test.scala | 212 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 204 insertions(+), 8 deletions(-) diff --git a/Test.scala b/Test.scala index facf6831b..eec636334 100644 --- a/Test.scala +++ b/Test.scala @@ -6,13 +6,18 @@ case class Variable(varName : String, relName: String, index : Int ) case class Atom(name : String, terms : List[Variable]) case class ConjunctiveQuery(head: Atom, body: List[Atom]) -case class WeightedRule(q : ConjunctiveQuery, weights : Option[List[String]]) extends Statement // Weighted rule case class SchemaElement( a : Atom , query : Boolean ) extends Statement // atom and whether this is a query relation. +case class ExtractionRule(q : ConjunctiveQuery, udfs : Option[String]) extends Statement // Extraction rule +case class InferenceRule(q : ConjunctiveQuery, weights : Option[List[String]]) extends Statement // Inference rule // Parser class ConjunctiveQueryParser extends JavaTokenParsers { - def stringliteral: Parser[String] = """[\w]+""".r + + // Odd definitions, but we'll keep them. + def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} + def stringliteral2: Parser[String] = """[a-zA-Z_0-9\.]*""".r ^^ {case (x) => x} + def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} // relation names and columns are just strings. def relation_name: Parser[String] = stringliteral ^^ {case (x) => x} @@ -25,6 +30,8 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } } + def udf : Parser[String] = stringliteral ^^ {case (x) => x} + def query : Parser[ConjunctiveQuery] = atom ~ ":-" ~ rep1sep(atom, ",") ^^ { case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) } @@ -34,22 +41,211 @@ class ConjunctiveQueryParser extends JavaTokenParsers { case (a ~ Some(_)) => SchemaElement(a,false) } + def extraction_rule : Parser[ExtractionRule] = query ~ opt( "udf=" ~ udf) ^^ { + case (q ~ Some("udf=" ~ udfs)) => ExtractionRule(q,Some(udfs)) + case (q ~ None) => ExtractionRule(q,None) + } - def rule : Parser[WeightedRule] = query ~ opt( "weight=" ~ rep1sep(col, ",")) ^^ { - case (q ~ Some("weight=" ~ weights)) => WeightedRule(q,Some(weights)) - case (q ~ None) => WeightedRule(q,None) + def inference_rule : Parser[InferenceRule] = query ~ opt( "weight=" ~ rep1sep(col, ",")) ^^ { + case (q ~ Some("weight=" ~ weights)) => InferenceRule(q,Some(weights)) + case (q ~ None) => InferenceRule(q,None) } + // rules or schema elements in aribitrary order - def statement : Parser[Statement] = (rule | schema_element) ^^ {case(x) => x} + def statement : Parser[Statement] = (extraction_rule | inference_rule | schema_element) ^^ {case(x) => x} def statements : Parser[List[Statement]] = rep1sep(statement, ";") ^^ { case(x) => x } } +// This handles the schema statements. +// It can tell you if a predicate is a "query" predicate or a "ground prediate" +// and it resolves Variables their correct and true name in the schema, i.e. R(x,y) then x could be Attribute1 declared. +class StatementSchema( statements : List[Statement] ) { + // TODO: refactor the schema into a class that constructs and + // manages these maps. Also it should have appropriate + // abstractions and error handling for missing values. + // ** Start refactor. + var schema : Map[ Tuple2[String,Int], String ] = new HashMap[ Tuple2[String,Int], String ]() + + var ground_relations : Map[ String, Boolean ] = new HashMap[ String, Boolean ]() + + def init() = { + // generate the statements. + statements.foreach { + case SchemaElement(Atom(r, terms),query) => + terms.foreach { + case Variable(n,r,i) => + schema += { (r,i) -> n } + ground_relations += { r -> query } // record whether a query or a ground term. + } + case ExtractionRule(_,_) => () + case InferenceRule(_,_) => () + } + println(schema) + println(ground_relations) + } + + init() + + // Given a variable, resolve it. TODO: This should give a warning, + // if we encouter a variable that is not in this map, then something + // odd has happened. + def resolveName( v : Variable ) : String = { + v match { case Variable(v,relName,i) => + if(schema contains (relName,i)) { + schema(relName,i) + } else { + return v // I do not like this default, as it allows some errors. TOOD: MAKE MORE PRECISE! + } + } + } + + // The default is query term. + def isQueryTerm( relName : String ): Boolean = { + if( ground_relations contains relName ) !ground_relations(relName) else true + } +} + +// This is responsible for schema elements within a given query, e.g., +// what is the canonical version of x? (i.e., the first time it is +// mentioned in the body. This is useful to translate to SQL (join +// conditions, select, etc.) +class QuerySchema(q : ConjunctiveQuery) { + var query_schema = new HashMap[ String, Tuple2[Int,Variable] ]() + + // maps each variable name to a canonical version of itself (first occurence in body in left-to-right order) + // index is the index of the subgoal/atom this variable is found in the body. + // variable is the complete Variable type for the found variable. + def generateCanonicalVar() = { + q.body.zipWithIndex.foreach { + case (Atom(relName,terms),index) => { + terms.foreach { + case Variable(v, r, i) => + if( ! (query_schema contains v) ) + query_schema += { v -> (index, Variable(v,r,i) ) } + } + } + } + } + generateCanonicalVar() // initialize + + // accessors + def getBodyIndex( varName : String ) : Int = { query_schema(varName)._1 } + def getVar(varName : String ) : Variable = { query_schema(varName)._2 } + +} object Test extends ConjunctiveQueryParser { + // This is generic code that generates the FROM with positional aliasing R0, R1, etc. + // and the corresponding WHERE clause (equating all variables) + def generateSQLBody(ss : StatementSchema, z : ConjunctiveQuery) : String = { + val bodyNames = ( z.body.zipWithIndex map { case(x,i) => s"${x.name} as R${i}"}).mkString(",") + // Simple logic for the where clause, first find every first occurence of a + // and stick it in a map. + val qs = new QuerySchema(z) + + val whereClause = z.body.zipWithIndex flatMap { + case (Atom(relName, terms),body_index) => + terms flatMap { + case Variable(varName, relName, index) => + val canonical_body_index = qs.getBodyIndex(varName) + + if (canonical_body_index != body_index) { + val real_attr_name1 = ss.resolveName( Variable(varName, relName, index) ) + val real_attr_name2 = ss.resolveName( qs.getVar(varName)) + Some(s"R${ body_index }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") + } else { None } + } + } + val whereClauseStr = whereClause match { + case Nil => "" + case _ => s"""WHERE ${whereClause.mkString(" AND ")}""" + } + + s"""FROM ${ bodyNames } ${ whereClauseStr }""" + } + // generate the node portion (V) of the factor graph + def nodeRule(ss : StatementSchema, z : ConjunctiveQuery) : String = { + val headTerms = z.head.terms map { + case Variable(v,r,i) => s"R${i}.${ss.resolveName(Variable(v,r,i)) }" + } + val headTermsStr = ( "0 as _id" :: headTerms ).mkString(",") + s"""CREATE TABLE ${ z.head.name } AS + SELECT DISTINCT ${ headTermsStr } + ${ generateSQLBody(ss,z) } + """ + } + + // Generate extraction rule part for deepdive + def extractionRule( ss: StatementSchema, r : ExtractionRule ) : String = { + println(r.udfs.get) + // Generate the body of the query. + val qs = new QuerySchema( r.q ) + // variable columns + val variableCols = r.q.head.terms flatMap { + case(Variable(v,rr,i)) => { + val index = qs.getBodyIndex(v) + val name = ss.resolveName(qs.getVar(v)) + val relation = r.q.body(index).name + Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) + } + } + + val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None + + val selectStr = (List(variableColsStr) flatMap (u => u)).mkString(", ") + + println(s"${selectStr}") + val inputQuery = s""" + SELECT ${selectStr} + ${ generateSQLBody(ss, r.q) }""" + + + val extractor = s""" + e_${r.udfs.get} { + input : \"\"\" ${inputQuery} + \"\"\" + output_relation : \"${r.q.head.name}\" + udf : \"/udf/${r.udfs.get}.py\" + style : \"tsv_extractor\" + } + """ + extractor + } + def main(args: Array[String]) = { - val q = parse(statements, args(0)) - println(q.get) + var input = """ + articles(article_id, text); + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); + people_mentions(sentence_id, start_position, length, text, mention_id); + has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id, id); + has_spouse_features(relation_id, feature); + people_mentions(sentence_id, words, ner_tags):- + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) + udf=ext_people; + has_spouse(sentence_id, p1.mention_id, p1.text, p2.mention_id, p2.text):- + people_mentions(sentence_id, p1.start_position, p1.length, p1.text, p1.mention_id), + people_mentions(sentence_id, p2.start_position, p2.length, p2.text, p2.mention_id) + udf=ext_has_spouse; + has_spouse_features(words, relation_id, p1.start_position, p1.length, p2.start_position, p2.length):- + sentences(s.document_id, s.sentence, words, s.lemma, s.pos_tags, s.dependencies, s.ner_tags, s.sentence_offset, sentence_id), + has_spouse(person1_id, person2_id, sentence_id, h.description, h.is_true, relation_id, h.id), + people_mentions(sentence_id, p1.start_position, p1.length, p1.text, person1_id), + people_mentions(sentence_id, p2.start_position, p2.length, p2.text, person2_id) + udf=ext_has_spouse_features; + """ + // val q = parse(statements, args(0)) + val q = parse(statements, input) + val schema = new StatementSchema( q.get ) + val queries = q.get flatMap { + case _ : SchemaElement => None + case e : ExtractionRule => + Some(extractionRule(schema, e)) + // case w : InferenceRule => + // Some(inferenceRule(schema,w)) + } + for (query <- queries) + println(query) } } From 49a5386ac79e277680b9d7b01dee248648b29a06 Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 10 Apr 2015 16:14:55 -0700 Subject: [PATCH 004/347] MERGEDgit commit -m ! --- Test.scala | 250 ++++++++++++++++++++++++++++++++++++++++++++++++----- run.sh | 7 ++ 2 files changed, 235 insertions(+), 22 deletions(-) create mode 100644 run.sh diff --git a/Test.scala b/Test.scala index eec636334..43cbd718b 100644 --- a/Test.scala +++ b/Test.scala @@ -1,19 +1,82 @@ import scala.util.parsing.combinator._ import scala.collection.immutable.HashMap +/* + This file parses an extended form of datalog like sugar. + It allows schema declarations + + SomeOther(realname, otherattribute) + + And queries + + Q(x,y) :- R(x,y), SomeOther(y, z) + + Using the schema can SQLized as + + SELECT R1.x,R2.y + FROM R as R1,SomeOther as R2 + WHERE R1.y = R2.realname + + We translate by introducing aliases R1, R2 , etc. to deal with + repeated symbols. + + TODO: + ================= + + Our schema needs to know whether a symbol is this a query table (and + so should contain an _id) field or is a regular table from the + user. + + If a head term is not mentioned in the schema, its assumed it is a + query table that this code must create. + + If one wants to explicilty mention a query table in the schema, they + do so with a trailing exclamation point as follows + + Q(x,y)!; + +Consider + + Q(x) :- R(x,f) weight=f + + ... R is likely *not* a variable table ... we record its translation below. + + In contrast, Q(x) :- R(x),S(x) ... coule be treated as variable tables. Hence, the schema has: + + R(x,f) // regular table + R(x,f)! // variable table. + + */ + +/* TODOs: + + Refactor schema object and introduce error checking (unsafe queries, + unordered attributes, etc.). +*/ + +// *************************************** +// * The union types for for the parser. * +// *************************************** trait Statement case class Variable(varName : String, relName: String, index : Int ) case class Atom(name : String, terms : List[Variable]) case class ConjunctiveQuery(head: Atom, body: List[Atom]) +sealed trait FactorWeight { + def variables : List[String] +} +case class KnownFactorWeight(value: Double) extends FactorWeight { + def variables = Nil +} +case class UnknownFactorWeight(variables: List[String]) extends FactorWeight + case class SchemaElement( a : Atom , query : Boolean ) extends Statement // atom and whether this is a query relation. case class ExtractionRule(q : ConjunctiveQuery, udfs : Option[String]) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : Option[List[String]]) extends Statement // Inference rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight) extends Statement // Weighted rule // Parser class ConjunctiveQueryParser extends JavaTokenParsers { - // Odd definitions, but we'll keep them. def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} def stringliteral2: Parser[String] = """[a-zA-Z_0-9\.]*""".r ^^ {case (x) => x} @@ -36,24 +99,29 @@ class ConjunctiveQueryParser extends JavaTokenParsers { case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) } - def schema_element : Parser[SchemaElement] = atom ~ opt("!") ^^ { + def schemaElement : Parser[SchemaElement] = atom ~ opt("!") ^^ { case (a ~ None) => SchemaElement(a,true) case (a ~ Some(_)) => SchemaElement(a,false) } - def extraction_rule : Parser[ExtractionRule] = query ~ opt( "udf=" ~ udf) ^^ { - case (q ~ Some("udf=" ~ udfs)) => ExtractionRule(q,Some(udfs)) - case (q ~ None) => ExtractionRule(q,None) + def extractionRule : Parser[ExtractionRule] = query ~ "udf" ~ "=" ~ opt(udf) ^^ { + case (q ~ "udf" ~ "=" ~ Some(udfs)) => ExtractionRule(q,Some(udfs)) + case (q ~ "udf" ~ "=" ~ None) => ExtractionRule(q,None) } - def inference_rule : Parser[InferenceRule] = query ~ opt( "weight=" ~ rep1sep(col, ",")) ^^ { - case (q ~ Some("weight=" ~ weights)) => InferenceRule(q,Some(weights)) - case (q ~ None) => InferenceRule(q,None) + def constantWeight = "weight" ~> "=" ~> """-?[\d\.]+""".r ^^ { x => KnownFactorWeight(x.toDouble) } + def unknwonWeight = "weight" ~> "=" ~> opt(rep1sep(col, ",")) ^^ { + case Some(varList) => UnknownFactorWeight(varList.toList) + case _ => UnknownFactorWeight(List()) } + def factorWeight = constantWeight | unknwonWeight + def inferenceRule : Parser[InferenceRule] = query ~ factorWeight ^^ { + case (q ~ weight) => InferenceRule(q, weight) + } // rules or schema elements in aribitrary order - def statement : Parser[Statement] = (extraction_rule | inference_rule | schema_element) ^^ {case(x) => x} + def statement : Parser[Statement] = (extractionRule | inferenceRule | schemaElement) ^^ {case(x) => x} def statements : Parser[List[Statement]] = rep1sep(statement, ";") ^^ { case(x) => x } } @@ -140,7 +208,7 @@ object Test extends ConjunctiveQueryParser { // This is generic code that generates the FROM with positional aliasing R0, R1, etc. // and the corresponding WHERE clause (equating all variables) def generateSQLBody(ss : StatementSchema, z : ConjunctiveQuery) : String = { - val bodyNames = ( z.body.zipWithIndex map { case(x,i) => s"${x.name} as R${i}"}).mkString(",") + val bodyNames = ( z.body.zipWithIndex map { case(x,i) => s"${x.name} R${i}"}).mkString(", ") // Simple logic for the where clause, first find every first occurence of a // and stick it in a map. val qs = new QuerySchema(z) @@ -163,7 +231,8 @@ object Test extends ConjunctiveQueryParser { case _ => s"""WHERE ${whereClause.mkString(" AND ")}""" } - s"""FROM ${ bodyNames } ${ whereClauseStr }""" + s"""FROM ${ bodyNames } + ${ whereClauseStr }""" } // generate the node portion (V) of the factor graph def nodeRule(ss : StatementSchema, z : ConjunctiveQuery) : String = { @@ -211,11 +280,149 @@ object Test extends ConjunctiveQueryParser { style : \"tsv_extractor\" } """ + println(extractor) extractor } + + // resolve a column name with alias + def resolveColumn(s: String, ss: StatementSchema, qs: QuerySchema, r : InferenceRule, + alias: Boolean) : Option[String] = { + val index = qs.getBodyIndex(s) + val name = ss.resolveName(qs.getVar(s)) + val relation = r.q.body(index).name + if (alias) + Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) + else + Some(s"${relation}.R${index}.${name}") + } + + // generate inference rule part for deepdive + def inferenceRule(ss : StatementSchema, r : InferenceRule) : String = { + // Generate the body of the query. + val qs = new QuerySchema( r.q ) + // check if relName is a ground term, if so skip it. + // if not, generate the id column. + val variableIds = r.q.body.zipWithIndex flatMap { + case (Atom(r,_),i) => + if(ss.isQueryTerm(r)) Some(s"""R${i}.id AS "${r}.R${i}.id" """) else None + } // we know have all variables in the body + + val variableIdsStr = if (variableIds.length > 0) Some(variableIds.mkString(", ")) else None + + // variable columns + val variableCols = r.q.head.terms flatMap { + case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r, true) + } + val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None + + // weight string + val uwStr = r.weights match { + case KnownFactorWeight(x) => None + case UnknownFactorWeight(w) => Some(w.flatMap(s => resolveColumn(s, ss, qs, r, true)).mkString(", ")) + } + + val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatMap (u => u)).mkString(", ") + + // factor input query + val inputQuery = s""" + SELECT ${selectStr} + ${ generateSQLBody(ss, r.q) }""" + + // variable columns using alias (for factor function) + val variableColsAlias = r.q.head.terms flatMap { + case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r, false) + } + val variableColsAliasStr = if (variableColsAlias.length > 0) Some(variableColsAlias.mkString(", ")) else None + + // factor function + val func = s"""Imply(${variableColsAliasStr.get})""" + + // weight + val weight = r.weights match { + case KnownFactorWeight(x) => s"${x}" + case UnknownFactorWeight(w) => { + s"""?(${w.flatMap(s => resolveColumn(s, ss, qs, r, false)).mkString(", ")})""" + } + } + + val rule = s""" + factor_${r.q.head.name} { + input_query: \"\"\"${inputQuery}\"\"\" + function: "${func}" + weight: "${weight}" + } + """ + println(rule) + + return inputQuery + } + /* + T(base_attr); + S(a1,a2) + Q(x) :- S(x,y),T(y) + Should generate. + + Node query: + CREATE TABLE Q AS + SELECT 0 as _id, R0.a1 + FROM S as R0,T as R1 + WHERE R0.a2 = R1.base_attr + + Edge Query (if S and T are probabilistic) + SELECT Q._id, array_agg( (S._id, T_.id) ) + FROM Q as R0,S as R1,T as R2 + WHERE S.y = T.base_attr AND + Q.x = S.x AND Q.z = S.z + + Factor Function: OR + + ======= + R(x,y) (assume non probabilistic) + + Q(x) :- R(x,f) weight=f + + Node Query: + CREATE TABLE Q AS + SELECT DISTINCT 0 as _id, x FROM R + + Edge Query: + SELECT 0 as _fid, Q.id, R.f as w + FROM Q, R + WHERE Q.x = R.x + + ======= + + */ def main(args: Array[String]) = { - var input = """ + val test1 = """ + S(a1,a2); + R(pk,f)!; + Q(x) :- R(x,f) weight=f; + Q2(x) :- R(x, f), S(x, y) weight = f""" + val test2 = """ + S(a1,a2); + R(pk,f); + Q(x) :- R(x,f) weight=f; + Q(x) :- S(x,y),T(y); + T(base_attr)!; + R(y,x) :- U(x,y); + S(x,y) :- R(x,y);""" + val test3 = """ + has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id, id)!; + has_spouse_features(relation_id, feature); + + f_has_spouse_features(x) :- + has_spouse(a, b, c, d, x, y, e), + has_spouse_features(y, f) + weight = f; + + f_has_spouse_symmetry(x, y) :- + has_spouse(a1, a2, a3, a4, x, a6, a7), + has_spouse(a2, a1, b3, b4, y, b6, b7) + weight = 1; + """ + val test4 = """ articles(article_id, text); sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); people_mentions(sentence_id, start_position, length, text, mention_id); @@ -235,17 +442,16 @@ object Test extends ConjunctiveQueryParser { people_mentions(sentence_id, p2.start_position, p2.length, p2.text, person2_id) udf=ext_has_spouse_features; """ - // val q = parse(statements, args(0)) - val q = parse(statements, input) + val q = parse(statements, test4) val schema = new StatementSchema( q.get ) + + println(test4) + println() + val queries = q.get flatMap { - case _ : SchemaElement => None - case e : ExtractionRule => - Some(extractionRule(schema, e)) - // case w : InferenceRule => - // Some(inferenceRule(schema,w)) + case _ : SchemaElement => None + case e : ExtractionRule => Some(extractionRule(schema, e)) + case w : InferenceRule => Some(inferenceRule(schema, w)) } - for (query <- queries) - println(query) } } diff --git a/run.sh b/run.sh new file mode 100644 index 000000000..ab1252885 --- /dev/null +++ b/run.sh @@ -0,0 +1,7 @@ +# scalac ConjunctiveQueryParser.scala +# scala ConjunctiveQueryParser + + +scalac Test.scala +scala Test + From c6cfcd99621a1ae5fe4e489f800df5d14f2caf5f Mon Sep 17 00:00:00 2001 From: feiranwang Date: Fri, 10 Apr 2015 16:25:55 -0700 Subject: [PATCH 005/347] refactoring --- Test.scala | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/Test.scala b/Test.scala index 43cbd718b..68c0641df 100644 --- a/Test.scala +++ b/Test.scala @@ -248,17 +248,11 @@ object Test extends ConjunctiveQueryParser { // Generate extraction rule part for deepdive def extractionRule( ss: StatementSchema, r : ExtractionRule ) : String = { - println(r.udfs.get) // Generate the body of the query. val qs = new QuerySchema( r.q ) // variable columns val variableCols = r.q.head.terms flatMap { - case(Variable(v,rr,i)) => { - val index = qs.getBodyIndex(v) - val name = ss.resolveName(qs.getVar(v)) - val relation = r.q.body(index).name - Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) - } + case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, true) } val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None @@ -286,11 +280,11 @@ object Test extends ConjunctiveQueryParser { // resolve a column name with alias - def resolveColumn(s: String, ss: StatementSchema, qs: QuerySchema, r : InferenceRule, + def resolveColumn(s: String, ss: StatementSchema, qs: QuerySchema, q : ConjunctiveQuery, alias: Boolean) : Option[String] = { val index = qs.getBodyIndex(s) val name = ss.resolveName(qs.getVar(s)) - val relation = r.q.body(index).name + val relation = q.body(index).name if (alias) Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) else @@ -312,14 +306,14 @@ object Test extends ConjunctiveQueryParser { // variable columns val variableCols = r.q.head.terms flatMap { - case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r, true) + case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, true) } val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None // weight string val uwStr = r.weights match { case KnownFactorWeight(x) => None - case UnknownFactorWeight(w) => Some(w.flatMap(s => resolveColumn(s, ss, qs, r, true)).mkString(", ")) + case UnknownFactorWeight(w) => Some(w.flatMap(s => resolveColumn(s, ss, qs, r.q, true)).mkString(", ")) } val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatMap (u => u)).mkString(", ") @@ -331,7 +325,7 @@ object Test extends ConjunctiveQueryParser { // variable columns using alias (for factor function) val variableColsAlias = r.q.head.terms flatMap { - case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r, false) + case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, false) } val variableColsAliasStr = if (variableColsAlias.length > 0) Some(variableColsAlias.mkString(", ")) else None @@ -342,7 +336,7 @@ object Test extends ConjunctiveQueryParser { val weight = r.weights match { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { - s"""?(${w.flatMap(s => resolveColumn(s, ss, qs, r, false)).mkString(", ")})""" + s"""?(${w.flatMap(s => resolveColumn(s, ss, qs, r.q, false)).mkString(", ")})""" } } From 15b9a198c84e9d8909c2b2edf1dd46612483fd98 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Fri, 10 Apr 2015 17:12:33 -0700 Subject: [PATCH 006/347] resolve variable schema --- Test.scala | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/Test.scala b/Test.scala index 68c0641df..f75ec5728 100644 --- a/Test.scala +++ b/Test.scala @@ -246,6 +246,29 @@ object Test extends ConjunctiveQueryParser { """ } + // generate variable schema statements + def variableSchema(statements : List[Statement], ss: StatementSchema) : String = { + var schema = Set[String]() + // generate the statements. + statements.foreach { + case InferenceRule(q, weights) => + val qs = new QuerySchema(q) + q.head.terms.foreach { + case Variable(n,r,i) => { + println(n) + val index = qs.getBodyIndex(n) + val name = ss.resolveName(qs.getVar(n)) + val relation = q.body(index).name + schema += s"${relation}.${name} : Boolean" + } + } + case _ => () + } + val ddSchema = schema.mkString("\n") + println(ddSchema) + ddSchema + } + // Generate extraction rule part for deepdive def extractionRule( ss: StatementSchema, r : ExtractionRule ) : String = { // Generate the body of the query. @@ -436,16 +459,14 @@ object Test extends ConjunctiveQueryParser { people_mentions(sentence_id, p2.start_position, p2.length, p2.text, person2_id) udf=ext_has_spouse_features; """ - val q = parse(statements, test4) + val q = parse(statements, test3) val schema = new StatementSchema( q.get ) + val variables = variableSchema(q.get, schema) - println(test4) - println() - - val queries = q.get flatMap { - case _ : SchemaElement => None - case e : ExtractionRule => Some(extractionRule(schema, e)) - case w : InferenceRule => Some(inferenceRule(schema, w)) - } + // val queries = q.get flatMap { + // case _ : SchemaElement => None + // case e : ExtractionRule => Some(extractionRule(schema, e)) + // case w : InferenceRule => Some(inferenceRule(schema, w)) + // } } } From 7ac3cb11e424cc5873da23eb033ed30024c2f136 Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 10 Apr 2015 17:35:05 -0700 Subject: [PATCH 007/347] add dependency in extraction rule --- Test.scala | 46 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/Test.scala b/Test.scala index 68c0641df..7405c03f9 100644 --- a/Test.scala +++ b/Test.scala @@ -58,7 +58,7 @@ Consider // * The union types for for the parser. * // *************************************** trait Statement -case class Variable(varName : String, relName: String, index : Int ) +case class Variable(varName : String, relName : String, index : Int ) case class Atom(name : String, terms : List[Variable]) case class ConjunctiveQuery(head: Atom, body: List[Atom]) @@ -247,7 +247,7 @@ object Test extends ConjunctiveQueryParser { } // Generate extraction rule part for deepdive - def extractionRule( ss: StatementSchema, r : ExtractionRule ) : String = { + def extractionRule( ss: StatementSchema, em: List[(Int, String)], r : ExtractionRule, index : Int ) : String = { // Generate the body of the query. val qs = new QuerySchema( r.q ) // variable columns @@ -259,19 +259,27 @@ object Test extends ConjunctiveQueryParser { val selectStr = (List(variableColsStr) flatMap (u => u)).mkString(", ") - println(s"${selectStr}") + // println(s"${selectStr}") val inputQuery = s""" SELECT ${selectStr} ${ generateSQLBody(ss, r.q) }""" + val dependencyRelation = r.q.body map { case(x) => s"${x.name}"} + var dependencies = List[String]() + for (e <- em) { + if (dependencyRelation contains e._2) + dependencies ::= s""" "extraction_rule_${e._1}" """ + } + val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" val extractor = s""" - e_${r.udfs.get} { + extraction_rule_${index} { input : \"\"\" ${inputQuery} \"\"\" output_relation : \"${r.q.head.name}\" udf : \"/udf/${r.udfs.get}.py\" style : \"tsv_extractor\" + ${dependencyStr} } """ println(extractor) @@ -439,13 +447,33 @@ object Test extends ConjunctiveQueryParser { val q = parse(statements, test4) val schema = new StatementSchema( q.get ) - println(test4) - println() + // println(test4) + // println() - val queries = q.get flatMap { + val extracions = q.get flatMap { case _ : SchemaElement => None - case e : ExtractionRule => Some(extractionRule(schema, e)) - case w : InferenceRule => Some(inferenceRule(schema, w)) + case e : ExtractionRule => Some(e) + case w : InferenceRule => None + } + val extractionsWithIndex = extracions.zipWithIndex + val extractionMap = extractionsWithIndex map { + case (e) => (e._2, e._1.q.head.name) + } + // for (extractor <- extractionsWithIndex) { + // extractionMap += (extractor.get(1), extractor.get(0).q.head) + // } + + println(extractionMap) + + val queries = extractionsWithIndex flatMap { + case (e) => Some(extractionRule(schema, extractionMap, e._1, e._2)) } + // println(extractionsWithIndex) + // println(extracions) + // val queries = q.get flatMap { + // case _ : SchemaElement => None + // case e : ExtractionRule => Some(extractionRule(schema, extractionMap, e)) + // case w : InferenceRule => Some(inferenceRule(schema, w)) + // } } } From 2ea05d54401b8a8ba727ac86010bcb650820d59b Mon Sep 17 00:00:00 2001 From: feiranwang Date: Thu, 16 Apr 2015 15:24:55 -0700 Subject: [PATCH 008/347] try new semantics --- Test.scala | 106 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 67 insertions(+), 39 deletions(-) diff --git a/Test.scala b/Test.scala index 89aac39dd..b55956ea4 100644 --- a/Test.scala +++ b/Test.scala @@ -72,7 +72,7 @@ case class UnknownFactorWeight(variables: List[String]) extends FactorWeight case class SchemaElement( a : Atom , query : Boolean ) extends Statement // atom and whether this is a query relation. case class ExtractionRule(q : ConjunctiveQuery, udfs : Option[String]) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight) extends Statement // Weighted rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String) extends Statement // Weighted rule // Parser @@ -116,8 +116,10 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } def factorWeight = constantWeight | unknwonWeight - def inferenceRule : Parser[InferenceRule] = query ~ factorWeight ^^ { - case (q ~ weight) => InferenceRule(q, weight) + def supervision = "label" ~> "=" ~> col + + def inferenceRule : Parser[InferenceRule] = query ~ factorWeight ~ supervision ^^ { + case (q ~ weight ~ supervision) => InferenceRule(q, weight, supervision) } // rules or schema elements in aribitrary order @@ -148,7 +150,7 @@ class StatementSchema( statements : List[Statement] ) { ground_relations += { r -> query } // record whether a query or a ground term. } case ExtractionRule(_,_) => () - case InferenceRule(_,_) => () + case InferenceRule(_,_,_) => () } println(schema) println(ground_relations) @@ -235,14 +237,17 @@ object Test extends ConjunctiveQueryParser { ${ whereClauseStr }""" } // generate the node portion (V) of the factor graph - def nodeRule(ss : StatementSchema, z : ConjunctiveQuery) : String = { - val headTerms = z.head.terms map { - case Variable(v,r,i) => s"R${i}.${ss.resolveName(Variable(v,r,i)) }" + def nodeRule(ss : StatementSchema, qs: QuerySchema, z : InferenceRule) : String = { + val headTerms = z.q.head.terms map { + case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" } - val headTermsStr = ( "0 as _id" :: headTerms ).mkString(",") - s"""CREATE TABLE ${ z.head.name } AS - SELECT DISTINCT ${ headTermsStr } - ${ generateSQLBody(ss,z) } + val index = qs.getBodyIndex(z.supervision) + val name = ss.resolveName(qs.getVar(z.supervision)) + val labelCol = s"R${index}.${name}" + val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") + s"""CREATE TABLE ${ z.q.head.name } AS + SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label + ${ generateSQLBody(ss,z.q) } """ } @@ -251,7 +256,7 @@ object Test extends ConjunctiveQueryParser { var schema = Set[String]() // generate the statements. statements.foreach { - case InferenceRule(q, weights) => + case InferenceRule(q, weights, supervision) => val qs = new QuerySchema(q) q.head.terms.foreach { case Variable(n,r,i) => { @@ -324,22 +329,36 @@ object Test extends ConjunctiveQueryParser { // generate inference rule part for deepdive def inferenceRule(ss : StatementSchema, r : InferenceRule) : String = { - // Generate the body of the query. + println("==================") val qs = new QuerySchema( r.q ) + + // node query + val node_query = if (ss.isQueryTerm(r.q.head.name)) Some(nodeRule(ss,qs,r)) else None + println(node_query) + + // edge query + val fakeBody = r.q.head +: r.q.body + println(fakeBody) + val fakeCQ = ConjunctiveQuery(r.q.head, fakeBody) // we will just use the fakeBody below. + + // Generate the body of the query. // check if relName is a ground term, if so skip it. // if not, generate the id column. - val variableIds = r.q.body.zipWithIndex flatMap { - case (Atom(r,_),i) => - if(ss.isQueryTerm(r)) Some(s"""R${i}.id AS "${r}.R${i}.id" """) else None - } // we know have all variables in the body + // val variableIds = r.q.body.zipWithIndex flatMap { + // case (Atom(r,_),i) => + // if(ss.isQueryTerm(r)) Some(s"""R${i}.id AS "${r}.R${i}.id" """) else None + // } // we know have all variables in the body - val variableIdsStr = if (variableIds.length > 0) Some(variableIds.mkString(", ")) else None + // val variableIdsStr = if (variableIds.length > 0) Some(variableIds.mkString(", ")) else None // variable columns - val variableCols = r.q.head.terms flatMap { - case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, true) - } - val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None + // val variableCols = r.q.head.terms flatMap { + // case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, true) + // } + // val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None + + val variableIdsStr = Some(s"""R0.id AS "${r.q.head.name}.R0.id" """) + val variableColsStr = Some(s"""R0.label AS "${r.q.head.name}.R0.label" """) // weight string val uwStr = r.weights match { @@ -352,16 +371,16 @@ object Test extends ConjunctiveQueryParser { // factor input query val inputQuery = s""" SELECT ${selectStr} - ${ generateSQLBody(ss, r.q) }""" + ${ generateSQLBody(ss, fakeCQ) }""" // variable columns using alias (for factor function) - val variableColsAlias = r.q.head.terms flatMap { - case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, false) - } - val variableColsAliasStr = if (variableColsAlias.length > 0) Some(variableColsAlias.mkString(", ")) else None + // val variableColsAlias = r.q.head.terms flatMap { + // case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, false) + // } + // val variableColsAliasStr = if (variableColsAlias.length > 0) Some(variableColsAlias.mkString(", ")) else None // factor function - val func = s"""Imply(${variableColsAliasStr.get})""" + val func = s"""Imply(${r.q.head.name}.R0.label)""" // weight val weight = r.weights match { @@ -419,7 +438,7 @@ object Test extends ConjunctiveQueryParser { ======= */ - def main(args: Array[String]) = { + def main(args: Array[String]) { val test1 = """ S(a1,a2); R(pk,f)!; @@ -434,19 +453,22 @@ object Test extends ConjunctiveQueryParser { R(y,x) :- U(x,y); S(x,y) :- R(x,y);""" val test3 = """ - has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id, id)!; + has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id); has_spouse_features(relation_id, feature); + q(rid)!; - f_has_spouse_features(x) :- - has_spouse(a, b, c, d, x, y, e), + q(y) :- + has_spouse(a, b, c, d, x, y), has_spouse_features(y, f) - weight = f; - - f_has_spouse_symmetry(x, y) :- - has_spouse(a1, a2, a3, a4, x, a6, a7), - has_spouse(a2, a1, b3, b4, y, b6, b7) - weight = 1; - """ + weight = f + label = x; + """ + + // f_has_spouse_symmetry(x, y) :- + // has_spouse(a1, a2, a3, a4, x, a6), + // has_spouse(a2, a1, b3, b4, y, b6) + // weight = 1; + // """ val test4 = """ articles(article_id, text); sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); @@ -489,6 +511,12 @@ object Test extends ConjunctiveQueryParser { val queries = extractionsWithIndex flatMap { case (e) => Some(extractionRule(schema, extractionMap, e._1, e._2)) } + + q.get flatMap { + case w : InferenceRule => Some(inferenceRule(schema, w)) + case _ => None + } + // println(extractionsWithIndex) // println(extracions) // val queries = q.get flatMap { From e86d99e8c2f8325f65c4d7514202c5e72746d72e Mon Sep 17 00:00:00 2001 From: senwu Date: Mon, 20 Apr 2015 16:16:54 -0700 Subject: [PATCH 009/347] fix issues to fit new syntax --- Test.scala | 145 ++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 111 insertions(+), 34 deletions(-) diff --git a/Test.scala b/Test.scala index 89aac39dd..7b317b2f0 100644 --- a/Test.scala +++ b/Test.scala @@ -71,7 +71,9 @@ case class KnownFactorWeight(value: Double) extends FactorWeight { case class UnknownFactorWeight(variables: List[String]) extends FactorWeight case class SchemaElement( a : Atom , query : Boolean ) extends Statement // atom and whether this is a query relation. -case class ExtractionRule(q : ConjunctiveQuery, udfs : Option[String]) extends Statement // Extraction rule +case class FunctionElement( functionName: String, input: String, output: String, implementation: String, mode: String) extends Statement +case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule +case class FunctionRule(input : String, output : String, function : String) extends Statement // Extraction rule case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight) extends Statement // Weighted rule @@ -79,7 +81,7 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight) extends S class ConjunctiveQueryParser extends JavaTokenParsers { // Odd definitions, but we'll keep them. def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} - def stringliteral2: Parser[String] = """[a-zA-Z_0-9\.]*""".r ^^ {case (x) => x} + def stringliteral2: Parser[String] = """[a-zA-Z_0-9\./]*""".r ^^ {case (x) => x} def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} // relation names and columns are just strings. @@ -104,9 +106,19 @@ class ConjunctiveQueryParser extends JavaTokenParsers { case (a ~ Some(_)) => SchemaElement(a,false) } - def extractionRule : Parser[ExtractionRule] = query ~ "udf" ~ "=" ~ opt(udf) ^^ { - case (q ~ "udf" ~ "=" ~ Some(udfs)) => ExtractionRule(q,Some(udfs)) - case (q ~ "udf" ~ "=" ~ None) => ExtractionRule(q,None) + + def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ stringliteral ~ "handles" ~ stringliteral ~ "lines" ^^ { + case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ d ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) + } + + + def extractionRule : Parser[ExtractionRule] = query ^^ { + case (q) => ExtractionRule(q) + // case (q ~ "udf" ~ "=" ~ None) => ExtractionRule(q,None) + } + + def functionRule : Parser[FunctionRule] = stringliteral ~ ":-" ~ "!" ~ stringliteral ~ "(" ~ stringliteral ~ ")" ^^ { + case (a ~ ":-" ~ "!" ~ b ~ "(" ~ c ~ ")") => FunctionRule(c, a, b) } def constantWeight = "weight" ~> "=" ~> """-?[\d\.]+""".r ^^ { x => KnownFactorWeight(x.toDouble) } @@ -121,9 +133,9 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } // rules or schema elements in aribitrary order - def statement : Parser[Statement] = (extractionRule | inferenceRule | schemaElement) ^^ {case(x) => x} + def statement : Parser[Statement] = (functionElement | extractionRule | functionRule | inferenceRule | schemaElement) ^^ {case(x) => x} - def statements : Parser[List[Statement]] = rep1sep(statement, ";") ^^ { case(x) => x } + def statements : Parser[List[Statement]] = rep1sep(statement, ".") ^^ { case(x) => x } } // This handles the schema statements. @@ -138,6 +150,8 @@ class StatementSchema( statements : List[Statement] ) { var ground_relations : Map[ String, Boolean ] = new HashMap[ String, Boolean ]() + var function_schema : Map[String, FunctionElement] = new HashMap[ String, FunctionElement]() + def init() = { // generate the statements. statements.foreach { @@ -147,8 +161,10 @@ class StatementSchema( statements : List[Statement] ) { schema += { (r,i) -> n } ground_relations += { r -> query } // record whether a query or a ground term. } - case ExtractionRule(_,_) => () + case ExtractionRule(_) => () case InferenceRule(_,_) => () + case FunctionElement(a, b, c, d, e) => function_schema += {a -> FunctionElement(a, b, c, d, e)} + case FunctionRule(_,_,_) => () } println(schema) println(ground_relations) @@ -169,6 +185,15 @@ class StatementSchema( statements : List[Statement] ) { } } + def resolveFunctionName( v : String ) : FunctionElement = { + if (function_schema contains v) { + function_schema(v) + } else { + return FunctionElement("0","0","0","0","0") + } + + } + // The default is query term. def isQueryTerm( relName : String ): Boolean = { if( ground_relations contains relName ) !ground_relations(relName) else true @@ -203,6 +228,8 @@ class QuerySchema(q : ConjunctiveQuery) { def getVar(varName : String ) : Variable = { query_schema(varName)._2 } } + + object Test extends ConjunctiveQueryParser { // This is generic code that generates the FROM with positional aliasing R0, R1, etc. @@ -294,14 +321,56 @@ object Test extends ConjunctiveQueryParser { dependencies ::= s""" "extraction_rule_${e._1}" """ } val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" + + val extractor = s""" + extraction_rule_${index} { + input : \"\"\" CREATE VIEW ${r.q.head.name} AS ${inputQuery} + \"\"\" + style : \"sql_extractor\" + ${dependencyStr} + } + """ + + // val extractor = s""" + // extraction_rule_${index} { + // input : \"\"\" CREATE VIEW ${r.q.head.name} AS ${inputQuery} + // \"\"\" + // output_relation : \"${r.q.head.name}\" + // udf : \"/udf/${r.udfs.get}.py\" + // style : \"tsv_extractor\" + // ${dependencyStr} + // } + // """ + println(extractor) + extractor + } + + def functionRule( ss: StatementSchema, dependencies: List[(Int, String)], r : FunctionRule, index : Int) : String = { + + val inputQuery = s""" + SELECT * FROM ${r.input} + """ + + val function = ss.resolveFunctionName(r.function) + + // val dependencyRelation = r.q.body map { case(x) => s"${x.name}"} + var dependency = List[String]() + for (d <- dependencies) { + if (r.input == d._2) { + dependency ::= s""" "extraction_rule_${d._1}" """ + } + } + val dependencyStr = if (dependency.length > 0) s"dependencies: [${dependency.mkString(", ")}]" else "" + + val extractor = s""" extraction_rule_${index} { - input : \"\"\" ${inputQuery} + input : \"\"\" SELECT * FROM ${r.input} \"\"\" - output_relation : \"${r.q.head.name}\" - udf : \"/udf/${r.udfs.get}.py\" - style : \"tsv_extractor\" + output_relation : \"${r.output}\" + udf : \"${function.implementation}\" + style : \"${function.mode}_extractor\" ${dependencyStr} } """ @@ -467,32 +536,40 @@ object Test extends ConjunctiveQueryParser { people_mentions(sentence_id, p2.start_position, p2.length, p2.text, person2_id) udf=ext_has_spouse_features; """ - val q = parse(statements, test3) + + val test5 = """ + ext_people_input( + sentence_id, + words, + ner_tags). + function ext_has_spouse_features over like ext_has_spouse_features_input + returns like has_spouse_features + implementation udf/ext_has_spouse_features.py handles tsv lines. + function ext_people over like ext_people_input + returns like people_mentions + implementation udf/ext_people.py handles tsv lines. + ext_people_input(sentence_id, words, ner_tags):- + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). + people_mentions :- + !ext_people(ext_people_input). + people_mentions_1 :- + !ext_people(people_mentions). + """ + val q = parse(statements, test5) + println(q) val schema = new StatementSchema( q.get ) val variables = variableSchema(q.get, schema) - val extracions = q.get flatMap { - case _ : SchemaElement => None - case e : ExtractionRule => Some(e) - case w : InferenceRule => None - } - val extractionsWithIndex = extracions.zipWithIndex - val extractionMap = extractionsWithIndex map { - case (e) => (e._2, e._1.q.head.name) + var dependencies = q.get.zipWithIndex map { + case (e : ExtractionRule, i) => (i, e.q.head.name) + case (f : FunctionRule, i) => (i, f.output) + case (_,_) => (-1, "-1") } - // for (extractor <- extractionsWithIndex) { - // extractionMap += (extractor.get(1), extractor.get(0).q.head) - // } - - println(extractionMap) - - val queries = extractionsWithIndex flatMap { - case (e) => Some(extractionRule(schema, extractionMap, e._1, e._2)) + val extracions = q.get.zipWithIndex flatMap { + case (e : ExtractionRule, i) => Some(extractionRule(schema, dependencies, e, i)) + // case (w : InferenceRule, i) => None + case (f : FunctionRule, i) => Some(functionRule(schema, dependencies, f, i)) + case (_,_) => None } - // println(extractionsWithIndex) - // println(extracions) - // val queries = q.get flatMap { - // case _ : SchemaElement => None - // case e : ExtractionRule => Some(extractionRule(schema, extractionMap, e)) } } From 8afbcf0fef7b9f046ec80c325a65e69b61d1b33b Mon Sep 17 00:00:00 2001 From: senwu Date: Mon, 20 Apr 2015 18:09:09 -0700 Subject: [PATCH 010/347] add spouse example as test case -- need feiran to add inference rule --- Test.scala | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/Test.scala b/Test.scala index 846c2fe0c..be3ef92d7 100644 --- a/Test.scala +++ b/Test.scala @@ -583,7 +583,90 @@ object Test extends ConjunctiveQueryParser { people_mentions_1 :- !ext_people(people_mentions). """ - val q = parse(statements, test3) + + val test6 = """ + articles( + article_id, + text); + sentences( + document_id, + sentence, + words, + lemma, + pos_tags, + dependencies, + ner_tags, + sentence_offset, + sentence_id); + people_mentions( + sentence_id, + start_position, + length, + text, + mention_id); + has_spouse_candidates( + person1_id, + person2_id, + sentence_id, + description, + relation_id); + has_spouse_features( + relation_id, + feature); + + has_spouse(relation_id)!; + + people_mentions :- + !ext_people(ext_people_input); + ext_people_input( + sentence_id, + words, + ner_tags); + ext_people_input(s, words, ner_tags) :- + sentences(a, b, words, c, d, e, ner_tags, f, s); + function ext_people over like ext_people_input + returns like people_mentions + implementation udf/ext_people.py handles tsv lines; + + has_spouse_candidates :- + !ext_has_spouse(ext_has_spouse_input); + ext_has_spouse_input( + sentence_id, + p1_id, + p1_text, + p2_id, + p2_text); + ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id); + function ext_has_spouse over like ext_has_spouse_input + returns like has_spouse_candidates + implementation udf/ext_has_spouse.py handles tsv lines; + + has_spouse_features :- + !ext_has_spouse_features(ext_has_spouse_features_input); + ext_has_spouse_features_input( + words, + relation_id, + p1_start_position, + p1_length, + p2_start_position, + p2_length); + ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, i, rid), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id); + function ext_has_spouse_features over like ext_has_spouse_features_input + returns like has_spouse_features + implementation udf/ext_has_spouse_features.py handles tsv lines; + """ + // has_spouse(rid) :- + // has_spouse_candidates(a, b, c, d, rid), + // has_spouse_features(rid, f) + // weight = f; + // """ + val q = parse(statements, test6) println(q) val schema = new StatementSchema( q.get ) val variables = variableSchema(q.get, schema) From 0c2db4bd3e48b02d9001ab9d32997972b857c783 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Mon, 20 Apr 2015 22:03:02 -0700 Subject: [PATCH 011/347] put node query into sql extractor --- Test.scala | 50 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/Test.scala b/Test.scala index 846c2fe0c..77d0bde98 100644 --- a/Test.scala +++ b/Test.scala @@ -265,7 +265,7 @@ object Test extends ConjunctiveQueryParser { ${ whereClauseStr }""" } // generate the node portion (V) of the factor graph - def nodeRule(ss : StatementSchema, qs: QuerySchema, z : InferenceRule) : String = { + def nodeRule(ss : StatementSchema, qs: QuerySchema, z : InferenceRule, dep: List[(Int, String)]) : String = { val headTerms = z.q.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" } @@ -273,10 +273,31 @@ object Test extends ConjunctiveQueryParser { val name = ss.resolveName(qs.getVar(z.supervision)) val labelCol = s"R${index}.${name}" val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") - s"""CREATE TABLE ${ z.q.head.name } AS - SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label - ${ generateSQLBody(ss,z.q) } - """ + val query = s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label + ${ generateSQLBody(ss,z.q) }""" + + val dependencyRelation = z.q.body map { case(x) => s"${x.name}"} + var dependencies = List[String]() + for (e <- dep) { + if (dependencyRelation contains e._2) + dependencies ::= s""" "extraction_rule_${e._1}" """ + } + val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" + + + // s"""CREATE TABLE ${ z.q.head.name } AS + // SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label + // ${ generateSQLBody(ss,z.q) } + // """ + s""" + extraction_rule_${z.q.head.name} { + input : \"\"\" CREATE TABLE ${z.q.head.name} AS + ${query} + \"\"\" + style : \"sql_extractor\" + ${dependencyStr} + } + """ } // generate variable schema statements @@ -286,15 +307,7 @@ object Test extends ConjunctiveQueryParser { statements.foreach { case InferenceRule(q, weights, supervision) => val qs = new QuerySchema(q) - q.head.terms.foreach { - case Variable(n,r,i) => { - println(n) - val index = qs.getBodyIndex(n) - val name = ss.resolveName(qs.getVar(n)) - val relation = q.body(index).name - schema += s"${relation}.${name} : Boolean" - } - } + schema += s"${q.head.name}.label : Boolean" case _ => () } val ddSchema = schema.mkString("\n") @@ -303,7 +316,7 @@ object Test extends ConjunctiveQueryParser { } // Generate extraction rule part for deepdive - def extractionRule( ss: StatementSchema, em: List[(Int, String)], r : ExtractionRule, index : Int ) : String = { + def extractionRule( ss: StatementSchema, em: List[(Int, String)], r : ExtractionRule, index : Int) : String = { // Generate the body of the query. val qs = new QuerySchema( r.q ) // variable columns @@ -398,12 +411,12 @@ object Test extends ConjunctiveQueryParser { } // generate inference rule part for deepdive - def inferenceRule(ss : StatementSchema, r : InferenceRule) : String = { + def inferenceRule(ss : StatementSchema, r : InferenceRule, dep : List[(Int, String)]) : String = { println("==================") val qs = new QuerySchema( r.q ) // node query - val node_query = if (ss.isQueryTerm(r.q.head.name)) Some(nodeRule(ss,qs,r)) else None + val node_query = if (ss.isQueryTerm(r.q.head.name)) Some(nodeRule(ss,qs,r, dep)) else None println(node_query) // edge query @@ -591,11 +604,12 @@ object Test extends ConjunctiveQueryParser { var dependencies = q.get.zipWithIndex map { case (e : ExtractionRule, i) => (i, e.q.head.name) case (f : FunctionRule, i) => (i, f.output) + case (w : InferenceRule, i) => (i, w.q.head.name) case (_,_) => (-1, "-1") } val queries = q.get.zipWithIndex flatMap { case (e : ExtractionRule, i) => Some(extractionRule(schema, dependencies, e, i)) - case (w : InferenceRule, i) => Some(inferenceRule(schema, w)) + case (w : InferenceRule, i) => Some(inferenceRule(schema, w, dependencies)) case (f : FunctionRule, i) => Some(functionRule(schema, dependencies, f, i)) case (_,_) => None } From a95f053ea61128a3d6afcf802865a6c98b95b27e Mon Sep 17 00:00:00 2001 From: feiranwang Date: Mon, 20 Apr 2015 22:07:09 -0700 Subject: [PATCH 012/347] add inference rule to example --- Test.scala | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Test.scala b/Test.scala index 5177c4c4e..1289fee3d 100644 --- a/Test.scala +++ b/Test.scala @@ -622,7 +622,8 @@ object Test extends ConjunctiveQueryParser { person2_id, sentence_id, description, - relation_id); + relation_id, + is_correct); has_spouse_features( relation_id, feature); @@ -673,6 +674,12 @@ object Test extends ConjunctiveQueryParser { function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features implementation udf/ext_has_spouse_features.py handles tsv lines; + + has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f) + weight = f + label = l; """ // has_spouse(rid) :- // has_spouse_candidates(a, b, c, d, rid), From fadab97376321122c0260e4ac49d37b9ce9d5127 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Mon, 20 Apr 2015 22:11:11 -0700 Subject: [PATCH 013/347] small fix --- Test.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Test.scala b/Test.scala index 1289fee3d..3330518c7 100644 --- a/Test.scala +++ b/Test.scala @@ -668,7 +668,7 @@ object Test extends ConjunctiveQueryParser { p2_length); ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, i, rid), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id); function ext_has_spouse_features over like ext_has_spouse_features_input From bdb2a8809dea58de56ba98562344b68797a1777b Mon Sep 17 00:00:00 2001 From: feiranwang Date: Tue, 21 Apr 2015 14:25:45 -0700 Subject: [PATCH 014/347] generate full application.conf --- Test.scala | 45 ++++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/Test.scala b/Test.scala index 3330518c7..9c44f81bf 100644 --- a/Test.scala +++ b/Test.scala @@ -169,8 +169,8 @@ class StatementSchema( statements : List[Statement] ) { case FunctionElement(a, b, c, d, e) => function_schema += {a -> FunctionElement(a, b, c, d, e)} case FunctionRule(_,_,_) => () } - println(schema) - println(ground_relations) + // println(schema) + // println(ground_relations) } init() @@ -284,13 +284,8 @@ object Test extends ConjunctiveQueryParser { } val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" - - // s"""CREATE TABLE ${ z.q.head.name } AS - // SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label - // ${ generateSQLBody(ss,z.q) } - // """ - s""" - extraction_rule_${z.q.head.name} { + val ext = s""" + extraction.extractors.extraction_rule_${z.q.head.name} { input : \"\"\" CREATE TABLE ${z.q.head.name} AS ${query} \"\"\" @@ -298,6 +293,8 @@ object Test extends ConjunctiveQueryParser { ${dependencyStr} } """ + println(ext) + ext } // generate variable schema statements @@ -310,7 +307,11 @@ object Test extends ConjunctiveQueryParser { schema += s"${q.head.name}.label : Boolean" case _ => () } - val ddSchema = schema.mkString("\n") + val ddSchema = s""" + deepdive.schema.variables { + ${schema.mkString("\n")} + } + """ println(ddSchema) ddSchema } @@ -328,7 +329,6 @@ object Test extends ConjunctiveQueryParser { val selectStr = (List(variableColsStr) flatMap (u => u)).mkString(", ") - // println(s"${selectStr}") val inputQuery = s""" SELECT ${selectStr} ${ generateSQLBody(ss, r.q) }""" @@ -342,7 +342,7 @@ object Test extends ConjunctiveQueryParser { val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" val extractor = s""" - extraction_rule_${index} { + extraction.extractors.extraction_rule_${index} { input : \"\"\" CREATE VIEW ${r.q.head.name} AS ${inputQuery} \"\"\" style : \"sql_extractor\" @@ -384,7 +384,7 @@ object Test extends ConjunctiveQueryParser { val extractor = s""" - extraction_rule_${index} { + extraction.extractors.extraction_rule_${index} { input : \"\"\" SELECT * FROM ${r.input} \"\"\" output_relation : \"${r.output}\" @@ -412,16 +412,13 @@ object Test extends ConjunctiveQueryParser { // generate inference rule part for deepdive def inferenceRule(ss : StatementSchema, r : InferenceRule, dep : List[(Int, String)]) : String = { - println("==================") val qs = new QuerySchema( r.q ) // node query val node_query = if (ss.isQueryTerm(r.q.head.name)) Some(nodeRule(ss,qs,r, dep)) else None - println(node_query) // edge query val fakeBody = r.q.head +: r.q.body - println(fakeBody) val fakeCQ = ConjunctiveQuery(r.q.head, fakeBody) // we will just use the fakeBody below. // Generate the body of the query. @@ -474,7 +471,7 @@ object Test extends ConjunctiveQueryParser { } val rule = s""" - factor_${r.q.head.name} { + inference.factors.factor_${r.q.head.name} { input_query: \"\"\"${inputQuery}\"\"\" function: "${func}" weight: "${weight}" @@ -484,6 +481,18 @@ object Test extends ConjunctiveQueryParser { return inputQuery } + + def dbSettings() : String = """ + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + } + """ /* T(base_attr); S(a1,a2) @@ -687,10 +696,8 @@ object Test extends ConjunctiveQueryParser { // weight = f; // """ val q = parse(statements, test6) - println(q) val schema = new StatementSchema( q.get ) val variables = variableSchema(q.get, schema) - println(variables) var dependencies = q.get.zipWithIndex map { case (e : ExtractionRule, i) => (i, e.q.head.name) case (f : FunctionRule, i) => (i, f.output) From b9289eab1176c867b348a5e7d21e7c9d6832ae5a Mon Sep 17 00:00:00 2001 From: feiranwang Date: Tue, 21 Apr 2015 16:31:32 -0700 Subject: [PATCH 015/347] minor change --- Test.scala | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/Test.scala b/Test.scala index 9c44f81bf..f23adcf72 100644 --- a/Test.scala +++ b/Test.scala @@ -108,7 +108,8 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } - def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ stringliteral ~ "handles" ~ stringliteral ~ "lines" ^^ { + def functionElement : Parser[FunctionElement] = "function" ~ stringliteral + ~ "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ stringliteral ~ "handles" ~ stringliteral ~ "lines" ^^ { case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ d ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) } @@ -286,10 +287,10 @@ object Test extends ConjunctiveQueryParser { val ext = s""" extraction.extractors.extraction_rule_${z.q.head.name} { - input : \"\"\" CREATE TABLE ${z.q.head.name} AS + input: \"\"\" CREATE TABLE ${z.q.head.name} AS ${query} \"\"\" - style : \"sql_extractor\" + style: "sql_extractor" ${dependencyStr} } """ @@ -304,7 +305,7 @@ object Test extends ConjunctiveQueryParser { statements.foreach { case InferenceRule(q, weights, supervision) => val qs = new QuerySchema(q) - schema += s"${q.head.name}.label : Boolean" + schema += s"${q.head.name}.label: Boolean" case _ => () } val ddSchema = s""" @@ -343,9 +344,9 @@ object Test extends ConjunctiveQueryParser { val extractor = s""" extraction.extractors.extraction_rule_${index} { - input : \"\"\" CREATE VIEW ${r.q.head.name} AS ${inputQuery} + input: \"\"\" CREATE VIEW ${r.q.head.name} AS ${inputQuery} \"\"\" - style : \"sql_extractor\" + style: \"sql_extractor\" ${dependencyStr} } """ @@ -385,11 +386,11 @@ object Test extends ConjunctiveQueryParser { val extractor = s""" extraction.extractors.extraction_rule_${index} { - input : \"\"\" SELECT * FROM ${r.input} + input: \"\"\" SELECT * FROM ${r.input} \"\"\" - output_relation : \"${r.output}\" - udf : \"${function.implementation}\" - style : \"${function.mode}_extractor\" + output_relation: \"${r.output}\" + udf: \"${function.implementation}\" + style: \"${function.mode}_extractor\" ${dependencyStr} } """ @@ -695,6 +696,7 @@ object Test extends ConjunctiveQueryParser { // has_spouse_features(rid, f) // weight = f; // """ + println(dbSettings()) val q = parse(statements, test6) val schema = new StatementSchema( q.get ) val variables = variableSchema(q.get, schema) From 3cf3506ea8e04fae3f2454bfd3e5bf506a6f99fd Mon Sep 17 00:00:00 2001 From: feiranwang Date: Tue, 21 Apr 2015 16:38:31 -0700 Subject: [PATCH 016/347] small fix --- Test.scala | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/Test.scala b/Test.scala index f23adcf72..73b50ed7d 100644 --- a/Test.scala +++ b/Test.scala @@ -108,8 +108,7 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } - def functionElement : Parser[FunctionElement] = "function" ~ stringliteral - ~ "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ stringliteral ~ "handles" ~ stringliteral ~ "lines" ^^ { + def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ stringliteral ~ "handles" ~ stringliteral ~ "lines" ^^ { case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ d ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) } @@ -286,7 +285,7 @@ object Test extends ConjunctiveQueryParser { val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" val ext = s""" - extraction.extractors.extraction_rule_${z.q.head.name} { + deepdive.extraction.extractors.extraction_rule_${z.q.head.name} { input: \"\"\" CREATE TABLE ${z.q.head.name} AS ${query} \"\"\" @@ -343,7 +342,7 @@ object Test extends ConjunctiveQueryParser { val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" val extractor = s""" - extraction.extractors.extraction_rule_${index} { + deepdive.extraction.extractors.extraction_rule_${index} { input: \"\"\" CREATE VIEW ${r.q.head.name} AS ${inputQuery} \"\"\" style: \"sql_extractor\" @@ -385,7 +384,7 @@ object Test extends ConjunctiveQueryParser { val extractor = s""" - extraction.extractors.extraction_rule_${index} { + deepdive.extraction.extractors.extraction_rule_${index} { input: \"\"\" SELECT * FROM ${r.input} \"\"\" output_relation: \"${r.output}\" @@ -472,7 +471,7 @@ object Test extends ConjunctiveQueryParser { } val rule = s""" - inference.factors.factor_${r.q.head.name} { + deepdive.inference.factors.factor_${r.q.head.name} { input_query: \"\"\"${inputQuery}\"\"\" function: "${func}" weight: "${weight}" From 50b9067b99725061d6a0f0702151887dbfe13026 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Tue, 21 Apr 2015 17:48:14 -0700 Subject: [PATCH 017/347] fix inference rule alias --- Test.scala | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/Test.scala b/Test.scala index 73b50ed7d..141a0ba35 100644 --- a/Test.scala +++ b/Test.scala @@ -286,7 +286,8 @@ object Test extends ConjunctiveQueryParser { val ext = s""" deepdive.extraction.extractors.extraction_rule_${z.q.head.name} { - input: \"\"\" CREATE TABLE ${z.q.head.name} AS + sql: \"\"\" DROP TABLE IF EXISTS ${z.q.head.name}; + CREATE TABLE ${z.q.head.name} AS ${query} \"\"\" style: "sql_extractor" @@ -343,9 +344,10 @@ object Test extends ConjunctiveQueryParser { val extractor = s""" deepdive.extraction.extractors.extraction_rule_${index} { - input: \"\"\" CREATE VIEW ${r.q.head.name} AS ${inputQuery} + sql: \"\"\" DROP VIEW IF EXISTS ${r.q.head.name}; + CREATE VIEW ${r.q.head.name} AS ${inputQuery} \"\"\" - style: \"sql_extractor\" + style: "sql_extractor" ${dependencyStr} } """ @@ -436,14 +438,15 @@ object Test extends ConjunctiveQueryParser { // case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, true) // } // val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None - + val index = r.q.body.length + 1 + val qs2 = new QuerySchema( fakeCQ ) val variableIdsStr = Some(s"""R0.id AS "${r.q.head.name}.R0.id" """) val variableColsStr = Some(s"""R0.label AS "${r.q.head.name}.R0.label" """) // weight string val uwStr = r.weights match { case KnownFactorWeight(x) => None - case UnknownFactorWeight(w) => Some(w.flatMap(s => resolveColumn(s, ss, qs, r.q, true)).mkString(", ")) + case UnknownFactorWeight(w) => Some(w.flatMap(s => resolveColumn(s, ss, qs2, fakeCQ, true)).mkString(", ")) } val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatMap (u => u)).mkString(", ") @@ -466,7 +469,7 @@ object Test extends ConjunctiveQueryParser { val weight = r.weights match { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { - s"""?(${w.flatMap(s => resolveColumn(s, ss, qs, r.q, false)).mkString(", ")})""" + s"""?(${w.flatMap(s => resolveColumn(s, ss, qs2, fakeCQ, false)).mkString(", ")})""" } } @@ -649,7 +652,7 @@ object Test extends ConjunctiveQueryParser { sentences(a, b, words, c, d, e, ner_tags, f, s); function ext_people over like ext_people_input returns like people_mentions - implementation udf/ext_people.py handles tsv lines; + implementation /Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py handles tsv lines; has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input); @@ -664,7 +667,7 @@ object Test extends ConjunctiveQueryParser { people_mentions(s, c, d, p2_text, p2_id); function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation udf/ext_has_spouse.py handles tsv lines; + implementation /Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py handles tsv lines; has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input); @@ -682,7 +685,7 @@ object Test extends ConjunctiveQueryParser { people_mentions(s, p2idx, p2len, l, person2_id); function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation udf/ext_has_spouse_features.py handles tsv lines; + implementation /Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py handles tsv lines; has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), From 9247777f645d98dcd6a9e72c39a69804bcbe6855 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Tue, 21 Apr 2015 21:23:52 -0700 Subject: [PATCH 018/347] small fix --- Test.scala | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/Test.scala b/Test.scala index 141a0ba35..309858285 100644 --- a/Test.scala +++ b/Test.scala @@ -351,17 +351,6 @@ object Test extends ConjunctiveQueryParser { ${dependencyStr} } """ - - // val extractor = s""" - // extraction_rule_${index} { - // input : \"\"\" CREATE VIEW ${r.q.head.name} AS ${inputQuery} - // \"\"\" - // output_relation : \"${r.q.head.name}\" - // udf : \"/udf/${r.udfs.get}.py\" - // style : \"tsv_extractor\" - // ${dependencyStr} - // } - // """ println(extractor) extractor } @@ -423,21 +412,6 @@ object Test extends ConjunctiveQueryParser { val fakeBody = r.q.head +: r.q.body val fakeCQ = ConjunctiveQuery(r.q.head, fakeBody) // we will just use the fakeBody below. - // Generate the body of the query. - // check if relName is a ground term, if so skip it. - // if not, generate the id column. - // val variableIds = r.q.body.zipWithIndex flatMap { - // case (Atom(r,_),i) => - // if(ss.isQueryTerm(r)) Some(s"""R${i}.id AS "${r}.R${i}.id" """) else None - // } // we know have all variables in the body - - // val variableIdsStr = if (variableIds.length > 0) Some(variableIds.mkString(", ")) else None - - // variable columns - // val variableCols = r.q.head.terms flatMap { - // case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, true) - // } - // val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None val index = r.q.body.length + 1 val qs2 = new QuerySchema( fakeCQ ) val variableIdsStr = Some(s"""R0.id AS "${r.q.head.name}.R0.id" """) @@ -456,12 +430,6 @@ object Test extends ConjunctiveQueryParser { SELECT ${selectStr} ${ generateSQLBody(ss, fakeCQ) }""" - // variable columns using alias (for factor function) - // val variableColsAlias = r.q.head.terms flatMap { - // case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, false) - // } - // val variableColsAliasStr = if (variableColsAlias.length > 0) Some(variableColsAlias.mkString(", ")) else None - // factor function val func = s"""Imply(${r.q.head.name}.R0.label)""" @@ -693,11 +661,6 @@ object Test extends ConjunctiveQueryParser { weight = f label = l; """ - // has_spouse(rid) :- - // has_spouse_candidates(a, b, c, d, rid), - // has_spouse_features(rid, f) - // weight = f; - // """ println(dbSettings()) val q = parse(statements, test6) val schema = new StatementSchema( q.get ) From f9531e21426d3fb56ad737c986a026499114a29d Mon Sep 17 00:00:00 2001 From: feiranwang Date: Tue, 21 Apr 2015 21:38:26 -0700 Subject: [PATCH 019/347] use . as delimiter, quote function implementation --- Test.scala | 60 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/Test.scala b/Test.scala index 309858285..5aa2756ce 100644 --- a/Test.scala +++ b/Test.scala @@ -83,7 +83,8 @@ class ConjunctiveQueryParser extends JavaTokenParsers { // def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} // def stringliteral2: Parser[String] = """[a-zA-Z_0-9\./]*""".r ^^ {case (x) => x} // def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} - def stringliteral: Parser[String] = """[a-zA-Z0-9\./_]+""".r + def stringliteral: Parser[String] = """[a-zA-Z0-9_]+""".r + def path: Parser[String] = """[a-zA-Z0-9\./_]+""".r // relation names and columns are just strings. def relation_name: Parser[String] = stringliteral ^^ {case (x) => x} @@ -102,14 +103,17 @@ class ConjunctiveQueryParser extends JavaTokenParsers { case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) } - def schemaElement : Parser[SchemaElement] = atom ~ opt("!") ^^ { + def schemaElement : Parser[SchemaElement] = atom ~ opt("?") ^^ { case (a ~ None) => SchemaElement(a,true) case (a ~ Some(_)) => SchemaElement(a,false) } - def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ stringliteral ~ "handles" ~ stringliteral ~ "lines" ^^ { - case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ d ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) + def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ + "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ + "\"" ~ path ~ "\"" ~ "handles" ~ stringliteral ~ "lines" ^^ { + case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ + "\"" ~ d ~ "\"" ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) } @@ -138,7 +142,7 @@ class ConjunctiveQueryParser extends JavaTokenParsers { // rules or schema elements in aribitrary order def statement : Parser[Statement] = (functionElement | inferenceRule | extractionRule | functionRule | schemaElement) ^^ {case(x) => x} - def statements : Parser[List[Statement]] = rep1sep(statement, ";") ^^ { case(x) => x } + def statements : Parser[List[Statement]] = rep1sep(statement, ".") ^^ { case(x) => x } } // This handles the schema statements. @@ -580,7 +584,7 @@ object Test extends ConjunctiveQueryParser { val test6 = """ articles( article_id, - text); + text). sentences( document_id, sentence, @@ -590,76 +594,84 @@ object Test extends ConjunctiveQueryParser { dependencies, ner_tags, sentence_offset, - sentence_id); + sentence_id). people_mentions( sentence_id, start_position, length, text, - mention_id); + mention_id). has_spouse_candidates( person1_id, person2_id, sentence_id, description, relation_id, - is_correct); + is_correct). has_spouse_features( relation_id, - feature); + feature). - has_spouse(relation_id)!; + has_spouse(relation_id)?. people_mentions :- - !ext_people(ext_people_input); + !ext_people(ext_people_input). ext_people_input( sentence_id, words, - ner_tags); + ner_tags). + ext_people_input(s, words, ner_tags) :- - sentences(a, b, words, c, d, e, ner_tags, f, s); + sentences(a, b, words, c, d, e, ner_tags, f, s). + function ext_people over like ext_people_input returns like people_mentions - implementation /Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py handles tsv lines; + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. has_spouse_candidates :- - !ext_has_spouse(ext_has_spouse_input); + !ext_has_spouse(ext_has_spouse_input). + ext_has_spouse_input( sentence_id, p1_id, p1_text, p2_id, - p2_text); + p2_text). + ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, a, b, p1_text, p1_id), - people_mentions(s, c, d, p2_text, p2_id); + people_mentions(s, c, d, p2_text, p2_id). + function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation /Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py handles tsv lines; + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- - !ext_has_spouse_features(ext_has_spouse_features_input); + !ext_has_spouse_features(ext_has_spouse_features_input). + ext_has_spouse_features_input( words, relation_id, p1_start_position, p1_length, p2_start_position, - p2_length); + p2_length). + ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id); + people_mentions(s, p2idx, p2len, l, person2_id). + function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation /Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py handles tsv lines; + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f - label = l; + label = l. """ println(dbSettings()) val q = parse(statements, test6) From 78f40720217ae0a5e8eaa1252ac70f50e2312e82 Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 21 Apr 2015 23:15:23 -0700 Subject: [PATCH 020/347] add attribute type in schema --- Test.scala | 105 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 60 insertions(+), 45 deletions(-) diff --git a/Test.scala b/Test.scala index 5aa2756ce..2696147a7 100644 --- a/Test.scala +++ b/Test.scala @@ -60,17 +60,21 @@ Consider trait Statement case class Variable(varName : String, relName : String, index : Int ) case class Atom(name : String, terms : List[Variable]) +case class Attribute(name : String, terms : List[Variable], types : List[String]) case class ConjunctiveQuery(head: Atom, body: List[Atom]) +case class Column(name : String, t : String) sealed trait FactorWeight { def variables : List[String] } + case class KnownFactorWeight(value: Double) extends FactorWeight { def variables = Nil } + case class UnknownFactorWeight(variables: List[String]) extends FactorWeight -case class SchemaElement( a : Atom , query : Boolean ) extends Statement // atom and whether this is a query relation. +case class SchemaElement( a : Attribute , query : Boolean ) extends Statement // atom and whether this is a query relation. case class FunctionElement( functionName: String, input: String, output: String, implementation: String, mode: String) extends Statement case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule case class FunctionRule(input : String, output : String, function : String) extends Statement // Extraction rule @@ -83,12 +87,15 @@ class ConjunctiveQueryParser extends JavaTokenParsers { // def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} // def stringliteral2: Parser[String] = """[a-zA-Z_0-9\./]*""".r ^^ {case (x) => x} // def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} - def stringliteral: Parser[String] = """[a-zA-Z0-9_]+""".r + def stringliteral: Parser[String] = """[a-zA-Z0-9_\[\]]+""".r def path: Parser[String] = """[a-zA-Z0-9\./_]+""".r // relation names and columns are just strings. def relation_name: Parser[String] = stringliteral ^^ {case (x) => x} def col : Parser[String] = stringliteral ^^ { case(x) => x } + def attr : Parser[Column] = stringliteral ~ stringliteral ^^ { + case(x ~ y) => Column(x, y) + } def atom: Parser[Atom] = relation_name ~ "(" ~ rep1sep(col, ",") ~ ")" ^^ { case (r ~ "(" ~ cols ~ ")") => { @@ -97,13 +104,21 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } } + def attribute: Parser[Attribute] = relation_name ~ "(" ~ rep1sep(attr, ",") ~ ")" ^^ { + case (r ~ "(" ~ attrs ~ ")") => { + val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } + var types = attrs map { case(x) => x.t } + Attribute(r,vars, types) + } + } + def udf : Parser[String] = stringliteral ^^ {case (x) => x} def query : Parser[ConjunctiveQuery] = atom ~ ":-" ~ rep1sep(atom, ",") ^^ { case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) } - def schemaElement : Parser[SchemaElement] = atom ~ opt("?") ^^ { + def schemaElement : Parser[SchemaElement] = attribute ~ opt("?") ^^ { case (a ~ None) => SchemaElement(a,true) case (a ~ Some(_)) => SchemaElement(a,false) } @@ -116,7 +131,6 @@ class ConjunctiveQueryParser extends JavaTokenParsers { "\"" ~ d ~ "\"" ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) } - def extractionRule : Parser[ExtractionRule] = query ^^ { case (q) => ExtractionRule(q) // case (q ~ "udf" ~ "=" ~ None) => ExtractionRule(q,None) @@ -162,7 +176,7 @@ class StatementSchema( statements : List[Statement] ) { def init() = { // generate the statements. statements.foreach { - case SchemaElement(Atom(r, terms),query) => + case SchemaElement(Attribute(r, terms, types),query) => terms.foreach { case Variable(n,r,i) => schema += { (r,i) -> n } @@ -583,43 +597,44 @@ object Test extends ConjunctiveQueryParser { val test6 = """ articles( - article_id, - text). + article_id text, + text text). sentences( - document_id, - sentence, - words, - lemma, - pos_tags, - dependencies, - ner_tags, - sentence_offset, - sentence_id). + document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text). people_mentions( - sentence_id, - start_position, - length, - text, - mention_id). + sentence_id text, + start_position int, + length int, + text text, + mention_id text). + has_spouse_candidates( - person1_id, - person2_id, - sentence_id, - description, - relation_id, - is_correct). + person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text). has_spouse_features( - relation_id, - feature). - - has_spouse(relation_id)?. + relation_id text, + feature text). + has_spouse(relation_id text)?. + people_mentions :- !ext_people(ext_people_input). + ext_people_input( - sentence_id, - words, - ner_tags). + sentence_id text, + words text[], + ner_tags text[]). ext_people_input(s, words, ner_tags) :- sentences(a, b, words, c, d, e, ner_tags, f, s). @@ -632,11 +647,11 @@ object Test extends ConjunctiveQueryParser { !ext_has_spouse(ext_has_spouse_input). ext_has_spouse_input( - sentence_id, - p1_id, - p1_text, - p2_id, - p2_text). + sentence_id text, + p1_id text, + p1_text text, + p2_id text, + p2_text text). ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, a, b, p1_text, p1_id), @@ -650,12 +665,12 @@ object Test extends ConjunctiveQueryParser { !ext_has_spouse_features(ext_has_spouse_features_input). ext_has_spouse_features_input( - words, - relation_id, - p1_start_position, - p1_length, - p2_start_position, - p2_length). + words text[], + relation_id text, + p1_start_position int, + p1_length int, + p2_start_position int, + p2_length int). ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), From eefd47d99296c6d36d5ac2d82bd8f25054da74ed Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Wed, 22 Apr 2015 23:25:41 -0700 Subject: [PATCH 021/347] Renames to DeepDiveLogCompiler.scala Uses sbt instead of scalac/scala --- .gitignore | 2 +- ConjunctiveQueryParser.scala | 342 ------------------------ Test.scala => DeepDiveLogCompiler.scala | 2 +- run.sh | 8 +- 4 files changed, 3 insertions(+), 351 deletions(-) delete mode 100644 ConjunctiveQueryParser.scala rename Test.scala => DeepDiveLogCompiler.scala (99%) diff --git a/.gitignore b/.gitignore index 6b468b62a..ea8c4bf7f 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -*.class +/target diff --git a/ConjunctiveQueryParser.scala b/ConjunctiveQueryParser.scala deleted file mode 100644 index ea6f5bcfe..000000000 --- a/ConjunctiveQueryParser.scala +++ /dev/null @@ -1,342 +0,0 @@ -import scala.util.parsing.combinator._ -import scala.collection.immutable.HashMap -/* - This file parses an extended form of datalog like sugar. - - It allows schema declarations - - SomeOther(realname, otherattribute) - - And queries - - Q(x,y) :- R(x,y), SomeOther(y, z) - - Using the schema can SQLized as - - SELECT R1.x,R2.y - FROM R as R1,SomeOther as R2 - WHERE R1.y = R2.realname - - We translate by introducing aliases R1, R2 , etc. to deal with - repeated symbols. - - TODO: - ================= - - Our schema needs to know whether a symbol is this a query table (and - so should contain an _id) field or is a regular table from the - user. - - If a head term is not mentioned in the schema, its assumed it is a - query table that this code must create. - - If one wants to explicilty mention a query table in the schema, they - do so with a trailing exclamation point as follows - - Q(x,y)!; - -Consider - - Q(x) :- R(x,f) weight=f - - ... R is likely *not* a variable table ... we record its translation below. - - In contrast, Q(x) :- R(x),S(x) ... coule be treated as variable tables. Hence, the schema has: - - R(x,f) // regular table - R(x,f)! // variable table. - - */ - -/* TODOs: - - Refactor schema object and introduce error checking (unsafe queries, - unordered attributes, etc.). -*/ - -// *************************************** -// * The union types for for the parser. * -// *************************************** -trait Statement -case class Variable(varName : String, relName: String, index : Int ) -case class Atom(name : String, terms : List[Variable]) -case class ConjunctiveQuery(head: Atom, body: List[Atom]) - -case class WeightedRule(q : ConjunctiveQuery, weights : Option[List[String]]) extends Statement // Weighted rule -case class SchemaElement( a : Atom , query : Boolean ) extends Statement // atom and whether this is a query relation. - - -// Parser -class ConjunctiveQueryParser extends JavaTokenParsers { - // Odd definitions, but we'll keep them. - def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} - def stringliteral2: Parser[String] = """[a-zA-Z_0-9\.]*""".r ^^ {case (x) => x} - def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} - - // relation names and columns are just strings. - def relation_name: Parser[String] = stringliteral ^^ {case (x) => x} - def col : Parser[String] = stringliteral ^^ { case(x) => x } - - def atom: Parser[Atom] = relation_name ~ "(" ~ rep1sep(col, ",") ~ ")" ^^ { - case (r ~ "(" ~ cols ~ ")") => { - val vars = cols.zipWithIndex map { case(name,i) => Variable(name, r, i) } - Atom(r,vars) - } - } - - def query : Parser[ConjunctiveQuery] = atom ~ ":-" ~ rep1sep(atom, ",") ^^ { - case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) - } - - def schema_element : Parser[SchemaElement] = atom ~ opt("!") ^^ { - case (a ~ None) => SchemaElement(a,true) - case (a ~ Some(_)) => SchemaElement(a,false) - } - - - def rule : Parser[WeightedRule] = query ~ opt( "weight=" ~ rep1sep(col, ",")) ^^ { - case (q ~ Some("weight=" ~ weights)) => WeightedRule(q,Some(weights)) - case (q ~ None) => WeightedRule(q,None) - } - - // rules or schema elements in aribitrary order - def statement : Parser[Statement] = (rule | schema_element) ^^ {case(x) => x} - - def statements : Parser[List[Statement]] = rep1sep(statement, ";") ^^ { case(x) => x } -} - - -// This handles the schema statements. -// It can tell you if a predicate is a "query" predicate or a "ground prediate" -// and it resolves Variables their correct and true name in the schema, i.e. R(x,y) then x could be Attribute1 declared. -class StatementSchema( statements : List[Statement] ) { - // TODO: refactor the schema into a class that constructs and - // manages these maps. Also it should have appropriate - // abstractions and error handling for missing values. - // ** Start refactor. - var schema : Map[ Tuple2[String,Int], String ] = new HashMap[ Tuple2[String,Int], String ]() - - var ground_relations : Map[ String, Boolean ] = new HashMap[ String, Boolean ]() - - def init() = { - // generate the statements. - statements.foreach { - case SchemaElement(Atom(r, terms),query) => - terms.foreach { - case Variable(n,r,i) => - schema += { (r,i) -> n } - ground_relations += { r -> query } // record whether a query or a ground term. - } - case WeightedRule(_,_) => () - } - println(schema) - println(ground_relations) - } - - init() - - // Given a variable, resolve it. TODO: This should give a warning, - // if we encouter a variable that is not in this map, then something - // odd has happened. - def resolveName( v : Variable ) : String = { - v match { case Variable(v,relName,i) => - if(schema contains (relName,i)) { - schema(relName,i) - } else { - return v // I do not like this default, as it allows some errors. TOOD: MAKE MORE PRECISE! - } - } - } - - // The default is query term. - def isQueryTerm( relName : String ): Boolean = { - if( ground_relations contains relName ) !ground_relations(relName) else true - } -} - -// This is responsible for schema elements within a given query, e.g., -// what is the canonical version of x? (i.e., the first time it is -// mentioned in the body. This is useful to translate to SQL (join -// conditions, select, etc.) -class QuerySchema(q : ConjunctiveQuery) { - var query_schema = new HashMap[ String, Tuple2[Int,Variable] ]() - - // maps each variable name to a canonical version of itself (first occurence in body in left-to-right order) - // index is the index of the subgoal/atom this variable is found in the body. - // variable is the complete Variable type for the found variable. - def generateCanonicalVar() = { - q.body.zipWithIndex.foreach { - case (Atom(relName,terms),index) => { - terms.foreach { - case Variable(v, r, i) => - if( ! (query_schema contains v) ) - query_schema += { v -> (index, Variable(v,r,i) ) } - } - } - } - } - generateCanonicalVar() // initialize - - // accessors - def getBodyIndex( varName : String ) : Int = { query_schema(varName)._1 } - def getVar(varName : String ) : Variable = { query_schema(varName)._2 } - -} -object ConjunctiveQueryParser extends ConjunctiveQueryParser { - - // This is generic code that generates the FROM with positional aliasing R0, R1, etc. - // and the corresponding WHERE clause (equating all variables) - def generateSQLBody(ss : StatementSchema, z : ConjunctiveQuery) : String = { - val bodyNames = ( z.body.zipWithIndex map { case(x,i) => s"${x.name} as R${i}"}).mkString(",") - // Simple logic for the where clause, first find every first occurence of a - // and stick it in a map. - val qs = new QuerySchema(z) - - val whereClause = z.body.zipWithIndex flatMap { - case (Atom(relName, terms),body_index) => - terms flatMap { - case Variable(varName, relName, index) => - val canonical_body_index = qs.getBodyIndex(varName) - - if (canonical_body_index != body_index) { - val real_attr_name1 = ss.resolveName( Variable(varName, relName, index) ) - val real_attr_name2 = ss.resolveName( qs.getVar(varName)) - Some(s"R${ body_index }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") - } else { None } - } - } - val whereClauseStr = whereClause match { - case Nil => "" - case _ => s"""WHERE ${whereClause.mkString(" AND ")}""" - } - - s"""FROM ${ bodyNames } ${ whereClauseStr }""" - } - // generate the node portion (V) of the factor graph - def nodeRule(ss : StatementSchema, z : ConjunctiveQuery) : String = { - val headTerms = z.head.terms map { - case Variable(v,r,i) => s"R${i}.${ss.resolveName(Variable(v,r,i)) }" - } - val headTermsStr = ( "0 as _id" :: headTerms ).mkString(",") - s"""CREATE TABLE ${ z.head.name } AS - SELECT DISTINCT ${ headTermsStr } - ${ generateSQLBody(ss,z) } - """ - } - - - // The input is a weighted rule and our goal is to generate both the - // node query and the (hyper) edge query. The node query is - // straightforward using our previous code. - - // The edge query has three parts. - - // The FROM and WHERE clause contain the same terms from node rule, with two extras. - // (1) We add the head atom into the FROM clause. - // (2) We add the join conditions to the WHERE clause between the head atom and the body. - // In the code below, we create a "fake CQ" and generate its body (ignoring the head) - - // The SELECT clause of the query is a bit interesting. - // (1) The SELECT clause contains the id of the head relation (if it is a query term) - // (2) The SELECT clause should also contain the weight attributes (resolved properly) - // (2) There should be an array_agg( tuple(id1,id2,..) ) of the all query relations in the body. - - // GROUP BY - // We should have a group that contains the head variable and the weight attributes. - def weightedRule( ss: StatementSchema, r : WeightedRule ) : Tuple2[Option[String], Option[String] ] = { - val node_query = if (ss.isQueryTerm(r.q.head.name)) Some(nodeRule(ss,r.q)) else None - val edge_query = { - // in the code below, we rely on the head being the last atom for indexing (since we index R{index}) - val fakeBody = r.q.body :+ r.q.head - val fakeCQ = ConjunctiveQuery(r.q.head, fakeBody) // we will just use the fakeBody below. - - // Generate the body of the query. - val qs = new QuerySchema( r.q ) - val body_attributes = r.q.body.zipWithIndex flatMap { - // check if relName is a ground term, if so skip it. - // if not, generate the id column. - case (Atom(r,_),i) => - if(ss.isQueryTerm(r)) Some(s"R${i}._id") else None - } // we know have all variables in the body - - // Construct the various terms for the select and group by - val factor_id_select = Some("0 as _fid") - val factor_id = Some("_fid") - val head_id = if (ss.isQueryTerm(r.q.head.name)) Some(s"R${ r.q.body.length }._id") else None - - // does array agg need a tuple constructor? - val array_agg = if (body_attributes.length > 0) Some(s"array_agg(${ body_attributes.mkString(", ") })") else None - - val uw_str = - r.weights match { - case None => None - case Some(w) => - val uw = w map { - case(s) => - s"R${ qs.getBodyIndex(s) }.${ ss.resolveName( qs.getVar(s) ) }" - } - Some(s"${ uw.mkString(", ") }") - } - - val select_str = (List(factor_id_select, head_id, array_agg, uw_str) flatMap { case(u) => u }).mkString(", ") - val group_str = (List(factor_id, head_id, uw_str) flatMap { case(u) => (u) }).mkString(", ") - - val u = s""" - SELECT ${select_str} - ${ generateSQLBody(ss, fakeCQ) } - GROUP BY ${group_str} """ - // if no random variables in the query then don't emit a factor term - if (ss.isQueryTerm(r.q.head.name) || body_attributes.length > 0) Some(u) else None - } - (node_query, edge_query) - } - - /* - T(base_attr); - S(a1,a2) - Q(x) :- S(x,y),T(y) - Should generate. - - Node query: - CREATE TABLE Q AS - SELECT 0 as _id, R0.a1 - FROM S as R0,T as R1 - WHERE R0.a2 = R1.base_attr - - Edge Query (if S and T are probabilistic) - SELECT Q._id, array_agg( (S._id, T_.id) ) - FROM Q as R0,S as R1,T as R2 - WHERE S.y = T.base_attr AND - Q.x = S.x AND Q.z = S.z - - Factor Function: OR - - ======= - R(x,y) (assume non probabilistic) - - Q(x) :- R(x,f) weight=f - - Node Query: - CREATE TABLE Q AS - SELECT DISTINCT 0 as _id, x FROM R - - Edge Query: - SELECT 0 as _fid, Q.id, R.f as w - FROM Q, R - WHERE Q.x = R.x - - ======= - - */ - def main(args: Array[String]) = { - val q = parse(statements, "S(a1,a2); R(pk,f); Q(x) :- R(x,f) weight=f; Q(x) :- S(x,y),T(y); T(base_attr)!; R(x,y) :- U(x,y); S(x,y) :- R(x,y);") - val schema = new StatementSchema( q.get ) - - val queries = q.get flatMap { - case _ : SchemaElement => None - case w : WeightedRule => - Some(weightedRule(schema,w)) - } - queries.foreach { case(query) => println(query) } - } -} diff --git a/Test.scala b/DeepDiveLogCompiler.scala similarity index 99% rename from Test.scala rename to DeepDiveLogCompiler.scala index 2696147a7..01a56a02b 100644 --- a/Test.scala +++ b/DeepDiveLogCompiler.scala @@ -251,7 +251,7 @@ class QuerySchema(q : ConjunctiveQuery) { } -object Test extends ConjunctiveQueryParser { +object ddlc extends ConjunctiveQueryParser { // This is generic code that generates the FROM with positional aliasing R0, R1, etc. // and the corresponding WHERE clause (equating all variables) diff --git a/run.sh b/run.sh index ab1252885..7c9cf44ca 100644 --- a/run.sh +++ b/run.sh @@ -1,7 +1 @@ -# scalac ConjunctiveQueryParser.scala -# scala ConjunctiveQueryParser - - -scalac Test.scala -scala Test - +sbt run From e8d68fde3f57a031be7de7729099cc8cc88f62cb Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Wed, 22 Apr 2015 23:32:31 -0700 Subject: [PATCH 022/347] Moves test input programs to separate files --- DeepDiveLogCompiler.scala | 177 ++------------------------------------ examples/test1.ddl | 4 + examples/test2.ddl | 7 ++ examples/test3.ddl | 19 ++++ examples/test4.ddl | 18 ++++ examples/test5.ddl | 16 ++++ examples/test6.ddl | 91 ++++++++++++++++++++ run.sh | 2 +- 8 files changed, 165 insertions(+), 169 deletions(-) create mode 100644 examples/test1.ddl create mode 100644 examples/test2.ddl create mode 100644 examples/test3.ddl create mode 100644 examples/test4.ddl create mode 100644 examples/test5.ddl create mode 100644 examples/test6.ddl diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 01a56a02b..81b7540b4 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -520,176 +520,17 @@ object ddlc extends ConjunctiveQueryParser { */ def main(args: Array[String]) { - val test1 = """ - S(a1,a2); - R(pk,f)!; - Q(x) :- R(x,f) weight=f; - Q2(x) :- R(x, f), S(x, y) weight = f""" - val test2 = """ - S(a1,a2); - R(pk,f); - Q(x) :- R(x,f) weight=f; - Q(x) :- S(x,y),T(y); - T(base_attr)!; - R(y,x) :- U(x,y); - S(x,y) :- R(x,y);""" - val test3 = """ - has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id); - has_spouse_features(relation_id, feature); - q(rid)!; - - q(y) :- - has_spouse(a, b, c, d, x, y), - has_spouse_features(y, f) - weight = f - label = x; - q(y) :- - has_spouse(a, b, c, d, x, y), - has_spouse_features(y, f) - weight = f - label = x; - """ - - // f_has_spouse_symmetry(x, y) :- - // has_spouse(a1, a2, a3, a4, x, a6), - // has_spouse(a2, a1, b3, b4, y, b6) - // weight = 1; - // """ - val test4 = """ - articles(article_id, text); - sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); - people_mentions(sentence_id, start_position, length, text, mention_id); - has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id, id); - has_spouse_features(relation_id, feature); - people_mentions(sentence_id, words, ner_tags):- - sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) - udf=ext_people; - has_spouse(sentence_id, p1.mention_id, p1.text, p2.mention_id, p2.text):- - people_mentions(sentence_id, p1.start_position, p1.length, p1.text, p1.mention_id), - people_mentions(sentence_id, p2.start_position, p2.length, p2.text, p2.mention_id) - udf=ext_has_spouse; - has_spouse_features(words, relation_id, p1.start_position, p1.length, p2.start_position, p2.length):- - sentences(s.document_id, s.sentence, words, s.lemma, s.pos_tags, s.dependencies, s.ner_tags, s.sentence_offset, sentence_id), - has_spouse(person1_id, person2_id, sentence_id, h.description, h.is_true, relation_id, h.id), - people_mentions(sentence_id, p1.start_position, p1.length, p1.text, person1_id), - people_mentions(sentence_id, p2.start_position, p2.length, p2.text, person2_id) - udf=ext_has_spouse_features; - """ - - val test5 = """ - ext_people_input( - sentence_id, - words, - ner_tags). - function ext_has_spouse_features over like ext_has_spouse_features_input - returns like has_spouse_features - implementation udf/ext_has_spouse_features.py handles tsv lines. - function ext_people over like ext_people_input - returns like people_mentions - implementation udf/ext_people.py handles tsv lines. - ext_people_input(sentence_id, words, ner_tags):- - sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). - people_mentions :- - !ext_people(ext_people_input). - people_mentions_1 :- - !ext_people(people_mentions). - """ + // get contents of all given files as one flat input program + val getContents = (filename: String) => { + val source = scala.io.Source.fromFile(filename) + try source.getLines mkString "\n" finally source.close() + } + val inputProgram = args.map(getContents).reduce(_ ++ _) - val test6 = """ - articles( - article_id text, - text text). - sentences( - document_id text, - sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], - sentence_offset int, - sentence_id text). - people_mentions( - sentence_id text, - start_position int, - length int, - text text, - mention_id text). - - has_spouse_candidates( - person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text). - has_spouse_features( - relation_id text, - feature text). - - has_spouse(relation_id text)?. - - people_mentions :- - !ext_people(ext_people_input). - - ext_people_input( - sentence_id text, - words text[], - ner_tags text[]). - - ext_people_input(s, words, ner_tags) :- - sentences(a, b, words, c, d, e, ner_tags, f, s). - - function ext_people over like ext_people_input - returns like people_mentions - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. - - has_spouse_candidates :- - !ext_has_spouse(ext_has_spouse_input). - - ext_has_spouse_input( - sentence_id text, - p1_id text, - p1_text text, - p2_id text, - p2_text text). - - ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- - people_mentions(s, a, b, p1_text, p1_id), - people_mentions(s, c, d, p2_text, p2_id). - - function ext_has_spouse over like ext_has_spouse_input - returns like has_spouse_candidates - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. - - has_spouse_features :- - !ext_has_spouse_features(ext_has_spouse_features_input). - - ext_has_spouse_features_input( - words text[], - relation_id text, - p1_start_position int, - p1_length int, - p2_start_position int, - p2_length int). - - ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). - - function ext_has_spouse_features over like ext_has_spouse_features_input - returns like has_spouse_features - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. - - has_spouse(rid) :- - has_spouse_candidates(a, b, c, d, rid, l), - has_spouse_features(rid, f) - weight = f - label = l. - """ + // TODO refactor side effects out of the codegen functions + // TODO print the generated code all at once at the end println(dbSettings()) - val q = parse(statements, test6) + val q = parse(statements, inputProgram) val schema = new StatementSchema( q.get ) val variables = variableSchema(q.get, schema) var dependencies = q.get.zipWithIndex map { diff --git a/examples/test1.ddl b/examples/test1.ddl new file mode 100644 index 000000000..2e0e20fdc --- /dev/null +++ b/examples/test1.ddl @@ -0,0 +1,4 @@ + S(a1,a2); + R(pk,f)!; + Q(x) :- R(x,f) weight=f; + Q2(x) :- R(x, f), S(x, y) weight = f diff --git a/examples/test2.ddl b/examples/test2.ddl new file mode 100644 index 000000000..6add95d11 --- /dev/null +++ b/examples/test2.ddl @@ -0,0 +1,7 @@ + S(a1,a2); + R(pk,f); + Q(x) :- R(x,f) weight=f; + Q(x) :- S(x,y),T(y); + T(base_attr)!; + R(y,x) :- U(x,y); + S(x,y) :- R(x,y); diff --git a/examples/test3.ddl b/examples/test3.ddl new file mode 100644 index 000000000..a5fdbc227 --- /dev/null +++ b/examples/test3.ddl @@ -0,0 +1,19 @@ + has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id); + has_spouse_features(relation_id, feature); + q(rid)!; + + q(y) :- + has_spouse(a, b, c, d, x, y), + has_spouse_features(y, f) + weight = f + label = x; + q(y) :- + has_spouse(a, b, c, d, x, y), + has_spouse_features(y, f) + weight = f + label = x; + + // f_has_spouse_symmetry(x, y) :- + // has_spouse(a1, a2, a3, a4, x, a6), + // has_spouse(a2, a1, b3, b4, y, b6) + // weight = 1; diff --git a/examples/test4.ddl b/examples/test4.ddl new file mode 100644 index 000000000..47989f676 --- /dev/null +++ b/examples/test4.ddl @@ -0,0 +1,18 @@ + articles(article_id, text); + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); + people_mentions(sentence_id, start_position, length, text, mention_id); + has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id, id); + has_spouse_features(relation_id, feature); + people_mentions(sentence_id, words, ner_tags):- + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) + udf=ext_people; + has_spouse(sentence_id, p1.mention_id, p1.text, p2.mention_id, p2.text):- + people_mentions(sentence_id, p1.start_position, p1.length, p1.text, p1.mention_id), + people_mentions(sentence_id, p2.start_position, p2.length, p2.text, p2.mention_id) + udf=ext_has_spouse; + has_spouse_features(words, relation_id, p1.start_position, p1.length, p2.start_position, p2.length):- + sentences(s.document_id, s.sentence, words, s.lemma, s.pos_tags, s.dependencies, s.ner_tags, s.sentence_offset, sentence_id), + has_spouse(person1_id, person2_id, sentence_id, h.description, h.is_true, relation_id, h.id), + people_mentions(sentence_id, p1.start_position, p1.length, p1.text, person1_id), + people_mentions(sentence_id, p2.start_position, p2.length, p2.text, person2_id) + udf=ext_has_spouse_features; diff --git a/examples/test5.ddl b/examples/test5.ddl new file mode 100644 index 000000000..55144dda4 --- /dev/null +++ b/examples/test5.ddl @@ -0,0 +1,16 @@ + ext_people_input( + sentence_id, + words, + ner_tags). + function ext_has_spouse_features over like ext_has_spouse_features_input + returns like has_spouse_features + implementation udf/ext_has_spouse_features.py handles tsv lines. + function ext_people over like ext_people_input + returns like people_mentions + implementation udf/ext_people.py handles tsv lines. + ext_people_input(sentence_id, words, ner_tags):- + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). + people_mentions :- + !ext_people(ext_people_input). + people_mentions_1 :- + !ext_people(people_mentions). diff --git a/examples/test6.ddl b/examples/test6.ddl new file mode 100644 index 000000000..753bea2e5 --- /dev/null +++ b/examples/test6.ddl @@ -0,0 +1,91 @@ +articles( + article_id text, + text text). +sentences( + document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text). +people_mentions( + sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +has_spouse_candidates( + person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text). +has_spouse_features( + relation_id text, + feature text). + +has_spouse(relation_id text)?. + +people_mentions :- + !ext_people(ext_people_input). + +ext_people_input( + sentence_id text, + words text[], + ner_tags text[]). + +ext_people_input(s, words, ner_tags) :- + sentences(a, b, words, c, d, e, ner_tags, f, s). + +function ext_people over like ext_people_input + returns like people_mentions + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. + +has_spouse_candidates :- + !ext_has_spouse(ext_has_spouse_input). + +ext_has_spouse_input( + sentence_id text, + p1_id text, + p1_text text, + p2_id text, + p2_text text). + +ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id). + +function ext_has_spouse over like ext_has_spouse_input + returns like has_spouse_candidates + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. + +has_spouse_features :- + !ext_has_spouse_features(ext_has_spouse_features_input). + +ext_has_spouse_features_input( + words text[], + relation_id text, + p1_start_position int, + p1_length int, + p2_start_position int, + p2_length int). + +ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +function ext_has_spouse_features over like ext_has_spouse_features_input + returns like has_spouse_features + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. + +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f) +weight = f +label = l. diff --git a/run.sh b/run.sh index 7c9cf44ca..fece1865a 100644 --- a/run.sh +++ b/run.sh @@ -1 +1 @@ -sbt run +sbt "run examples/test6.ddl" From 055345a30cb02ef7b7fbd3eaa9ac3fbd58b30cdf Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Wed, 22 Apr 2015 23:34:00 -0700 Subject: [PATCH 023/347] Runs a packaged jar instead of doing sbt run --- .gitignore | 1 + run.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index ea8c4bf7f..e6525fdb8 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ +/ddlc.jar /target diff --git a/run.sh b/run.sh index fece1865a..6a4748fd8 100644 --- a/run.sh +++ b/run.sh @@ -1 +1,2 @@ -sbt "run examples/test6.ddl" +sbt package +scala target/scala-*/*.jar examples/test6.ddl From b5f81d8db7f5eb28a88812f66bac874b9a75124d Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Wed, 22 Apr 2015 23:45:45 -0700 Subject: [PATCH 024/347] Turns run.sh into a Makefile with README --- Makefile | 12 ++++++++++++ README.md | 16 ++++++++++++++++ run.sh | 2 -- 3 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 Makefile create mode 100644 README.md delete mode 100644 run.sh diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..b498c074e --- /dev/null +++ b/Makefile @@ -0,0 +1,12 @@ +# Makefile for DeepDiveLogCompiler + +JAR = ddlc.jar + +test: $(JAR) + scala $(JAR) examples/test6.ddl + +$(JAR): $(wildcard *.scala) + sbt package + jar=(target/scala-*/*.jar); ln -sfn $${jar[0]} $(JAR) + touch $(JAR) + diff --git a/README.md b/README.md new file mode 100644 index 000000000..87f79a46d --- /dev/null +++ b/README.md @@ -0,0 +1,16 @@ +DeepDiveLogCompiler +=================== + +A compiler that enables writing DeepDive apps in a Datalog-like syntax. + +## Building + +```bash +make +``` + +## Running +The following will generate an application.conf for the [spouse example in DeepDive's tutorial](http://deepdive.stanford.edu/doc/basics/walkthrough/walkthrough.html). +```bash +scala ddlc.jar examples/spouse_example.ddl >application.conf +``` diff --git a/run.sh b/run.sh deleted file mode 100644 index 6a4748fd8..000000000 --- a/run.sh +++ /dev/null @@ -1,2 +0,0 @@ -sbt package -scala target/scala-*/*.jar examples/test6.ddl From 8ebad42f3834f6499c57e64b6d1a3979a251aa9e Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 02:07:57 -0700 Subject: [PATCH 025/347] Adds an expected output that will guide refactor --- Makefile | 2 +- examples/test6.expected | 104 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 examples/test6.expected diff --git a/Makefile b/Makefile index b498c074e..adf97c16c 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ JAR = ddlc.jar test: $(JAR) - scala $(JAR) examples/test6.ddl + scala $(JAR) examples/test6.ddl | diff -u examples/test6.expected - $(JAR): $(wildcard *.scala) sbt package diff --git a/examples/test6.expected b/examples/test6.expected new file mode 100644 index 000000000..6429ede0e --- /dev/null +++ b/examples/test6.expected @@ -0,0 +1,104 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + } + + + deepdive.schema.variables { + has_spouse.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_6 { + input: """ SELECT * FROM ext_people_input + """ + output_relation: "people_mentions" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_8" ] + } + + + deepdive.extraction.extractors.extraction_rule_8 { + sql: """ DROP VIEW IF EXISTS ext_people_input; + CREATE VIEW ext_people_input AS + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + FROM sentences R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_10 { + input: """ SELECT * FROM ext_has_spouse_input + """ + output_relation: "has_spouse_candidates" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_12" ] + } + + + deepdive.extraction.extractors.extraction_rule_12 { + sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; + CREATE VIEW ext_has_spouse_input AS + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + FROM people_mentions R0, people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_6" ] + } + + + deepdive.extraction.extractors.extraction_rule_14 { + input: """ SELECT * FROM ext_has_spouse_features_input + """ + output_relation: "has_spouse_features" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_16" ] + } + + + deepdive.extraction.extractors.extraction_rule_16 { + sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; + CREATE VIEW ext_has_spouse_features_input AS + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_10" , "extraction_rule_6" ] + } + + + deepdive.extraction.extractors.extraction_rule_has_spouse { + sql: """ DROP TABLE IF EXISTS has_spouse; + CREATE TABLE has_spouse AS + SELECT DISTINCT 0 as id, R0.relation_id, R0.l AS label + FROM has_spouse_candidates R0, has_spouse_features R1 + WHERE R1.relation_id = R0.relation_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_14" , "extraction_rule_10" ] + } + + + deepdive.inference.factors.factor_has_spouse { + input_query: """ + SELECT R0.id AS "has_spouse.R0.id" , R0.label AS "has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" + FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + function: "Imply(has_spouse.R0.label)" + weight: "?(has_spouse_features.R2.feature)" + } + From c733cafcd914736168f91a808fe79154f36e3853 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Fri, 24 Apr 2015 00:32:51 -0700 Subject: [PATCH 026/347] Separates make rules for ddlc-test.jar and ddlc.jar --- .gitignore | 3 ++- Makefile | 24 ++++++++++++++++++------ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index e6525fdb8..a57fd1549 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ /ddlc.jar -/target +/ddlc-test.jar +target diff --git a/Makefile b/Makefile index adf97c16c..5a36a213a 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,24 @@ # Makefile for DeepDiveLogCompiler -JAR = ddlc.jar +TESTJAR = ddlc-test.jar +TEST = examples/test6.ddl -test: $(JAR) - scala $(JAR) examples/test6.ddl | diff -u examples/test6.expected - +test: $(TESTJAR) + CLASSPATH=$(shell sbt "export compile:dependency-classpath" | tail -1) \ + scala $< $(TEST) | diff -u $(TEST:.ddl=.expected) - +$(TESTJAR): $(wildcard *.scala) + sbt package + ln -sfn $(shell ls -t target/scala-*/*_*.jar | head -1) $@ + touch $@ +# standalone jar +JAR = ddlc.jar +test-package: $(JAR) + scala $< $(TEST) | diff -u $(TEST:.ddl=.expected) - $(JAR): $(wildcard *.scala) - sbt package - jar=(target/scala-*/*.jar); ln -sfn $${jar[0]} $(JAR) - touch $(JAR) + sbt assembly + ln -sfn $(shell ls -t target/scala-*/*-assembly-*.jar | head -1) $@ + touch $@ +clean: + sbt clean From 1a3aeb01f9c215ecd6e1be0d66d9cd53f353d3ed Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 01:50:00 -0700 Subject: [PATCH 027/347] Makes compilation side effect free by moving all println()s to the end of main(), and making their type uniform to produce a list of application.conf blocks (List[String]). --- DeepDiveLogCompiler.scala | 105 ++++++++++++++++++++++++-------------- 1 file changed, 66 insertions(+), 39 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 81b7540b4..690207bec 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -187,8 +187,6 @@ class StatementSchema( statements : List[Statement] ) { case FunctionElement(a, b, c, d, e) => function_schema += {a -> FunctionElement(a, b, c, d, e)} case FunctionRule(_,_,_) => () } - // println(schema) - // println(ground_relations) } init() @@ -250,8 +248,28 @@ class QuerySchema(q : ConjunctiveQuery) { } +// The compiler +object DeepDiveLogCompiler { -object ddlc extends ConjunctiveQueryParser { + def parseArgs(args: Array[String]) = { + val getContents = (filename: String) => { + val source = scala.io.Source.fromFile(filename) + try source.getLines mkString "\n" finally source.close() + } + args.map(getContents).reduce(_ ++ _) + } + + val parser = new ConjunctiveQueryParser + def parseProgram(inputProgram: String) = parser.parse(parser.statements, inputProgram) + + type CompiledBlocks = List[String] + class CompilationState( + // schema derived from all statements + ss: Option[StatementSchema], + // all head names + headNames: List[String], + // a handy counter for generating unique names + numRules: Int) // This is generic code that generates the FROM with positional aliasing R0, R1, etc. // and the corresponding WHERE clause (equating all variables) @@ -283,7 +301,7 @@ object ddlc extends ConjunctiveQueryParser { ${ whereClauseStr }""" } // generate the node portion (V) of the factor graph - def nodeRule(ss : StatementSchema, qs: QuerySchema, z : InferenceRule, dep: List[(Int, String)]) : String = { + def compileNodeRule(z: InferenceRule, qs: QuerySchema, ss: StatementSchema, dep: List[(Int, String)]) : CompiledBlocks = { val headTerms = z.q.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" } @@ -312,12 +330,11 @@ object ddlc extends ConjunctiveQueryParser { ${dependencyStr} } """ - println(ext) - ext + List(ext) } // generate variable schema statements - def variableSchema(statements : List[Statement], ss: StatementSchema) : String = { + def compileVariableSchema(statements: List[Statement], ss: StatementSchema): CompiledBlocks = { var schema = Set[String]() // generate the statements. statements.foreach { @@ -331,12 +348,11 @@ object ddlc extends ConjunctiveQueryParser { ${schema.mkString("\n")} } """ - println(ddSchema) - ddSchema + List(ddSchema) } // Generate extraction rule part for deepdive - def extractionRule( ss: StatementSchema, em: List[(Int, String)], r : ExtractionRule, index : Int) : String = { + def compile(r: ExtractionRule, index: Int, ss: StatementSchema, em: List[(Int, String)]): CompiledBlocks = { // Generate the body of the query. val qs = new QuerySchema( r.q ) // variable columns @@ -369,11 +385,10 @@ object ddlc extends ConjunctiveQueryParser { ${dependencyStr} } """ - println(extractor) - extractor + List(extractor) } - def functionRule( ss: StatementSchema, dependencies: List[(Int, String)], r : FunctionRule, index : Int) : String = { + def compile(r: FunctionRule, index: Int, ss: StatementSchema, dependencies: List[(Int, String)]): CompiledBlocks = { val inputQuery = s""" SELECT * FROM ${r.input} @@ -402,8 +417,7 @@ object ddlc extends ConjunctiveQueryParser { ${dependencyStr} } """ - println(extractor) - extractor + List(extractor) } @@ -420,11 +434,13 @@ object ddlc extends ConjunctiveQueryParser { } // generate inference rule part for deepdive - def inferenceRule(ss : StatementSchema, r : InferenceRule, dep : List[(Int, String)]) : String = { + def compile(r: InferenceRule, i: Int, ss: StatementSchema, dep: List[(Int, String)]): CompiledBlocks = { + var blocks = List[String]() val qs = new QuerySchema( r.q ) // node query - val node_query = if (ss.isQueryTerm(r.q.head.name)) Some(nodeRule(ss,qs,r, dep)) else None + if (ss.isQueryTerm(r.q.head.name)) + blocks :::= compileNodeRule(r, qs, ss, dep) // edge query val fakeBody = r.q.head +: r.q.body @@ -459,16 +475,15 @@ object ddlc extends ConjunctiveQueryParser { } } - val rule = s""" + blocks ::= s""" deepdive.inference.factors.factor_${r.q.head.name} { input_query: \"\"\"${inputQuery}\"\"\" function: "${func}" weight: "${weight}" } """ - println(rule) - return inputQuery + blocks.reverse } def dbSettings() : String = """ @@ -521,29 +536,41 @@ object ddlc extends ConjunctiveQueryParser { */ def main(args: Array[String]) { // get contents of all given files as one flat input program - val getContents = (filename: String) => { - val source = scala.io.Source.fromFile(filename) - try source.getLines mkString "\n" finally source.close() - } - val inputProgram = args.map(getContents).reduce(_ ++ _) - - // TODO refactor side effects out of the codegen functions - // TODO print the generated code all at once at the end - println(dbSettings()) - val q = parse(statements, inputProgram) - val schema = new StatementSchema( q.get ) - val variables = variableSchema(q.get, schema) - var dependencies = q.get.zipWithIndex map { + val inputProgram = parseArgs(args) + val parsedProgram = parseProgram(inputProgram) + + // take an initial pass to analyze the parsed program + val schema = new StatementSchema( parsedProgram.get ) + // TODO analyze the real dependency (Map[headname, List[headname]]) here + var dependencies = parsedProgram.get.zipWithIndex map { case (e : ExtractionRule, i) => (i, e.q.head.name) case (f : FunctionRule, i) => (i, f.output) case (w : InferenceRule, i) => (i, w.q.head.name) case (_,_) => (-1, "-1") } - val queries = q.get.zipWithIndex flatMap { - case (e : ExtractionRule, i) => Some(extractionRule(schema, dependencies, e, i)) - case (w : InferenceRule, i) => Some(inferenceRule(schema, w, dependencies)) - case (f : FunctionRule, i) => Some(functionRule(schema, dependencies, f, i)) - case (_,_) => None - } + + // compile the program into blocks of application.conf + val compiledBlocks = ( + compileVariableSchema(parsedProgram.get, schema) + ::: + ( + parsedProgram.get.zipWithIndex flatMap { + // XXX Ideally, a single compile call should handle all the polymorphic + // cases, but Scala/Java's ad-hoc polymorphism doesn't work that way. + // Instead, we need to use the visitor pattern, adding compile(...) + // methods to all case classes of Statement. + // TODO move schema, dependencies args into a composite field of type CompilationState + // TODO get rid of zipWithIndex by keeping a counter in the CompilationState + case (s:InferenceRule , i:Int) => compile(s, i, schema, dependencies) + case (s:ExtractionRule, i:Int) => compile(s, i, schema, dependencies) + case (s:FunctionRule , i:Int) => compile(s, i, schema, dependencies) + case _ => List() + } + ) + ) + + // emit the generated code + println(dbSettings()) // TODO read user's proto-application.conf and augment it + compiledBlocks foreach println } } From 951437ca244a7d86e41d8b468ca0ee32932e3aa1 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 01:52:20 -0700 Subject: [PATCH 028/347] Cleans trailing white spaces --- DeepDiveLogCompiler.scala | 82 +++++++++++++++++++-------------------- examples/test6.expected | 10 ++--- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 690207bec..56df143d5 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -10,25 +10,25 @@ import scala.collection.immutable.HashMap And queries Q(x,y) :- R(x,y), SomeOther(y, z) - + Using the schema can SQLized as - + SELECT R1.x,R2.y FROM R as R1,SomeOther as R2 WHERE R1.y = R2.realname We translate by introducing aliases R1, R2 , etc. to deal with repeated symbols. - - TODO: + + TODO: ================= Our schema needs to know whether a symbol is this a query table (and so should contain an _id) field or is a regular table from the - user. + user. If a head term is not mentioned in the schema, its assumed it is a - query table that this code must create. + query table that this code must create. If one wants to explicilty mention a query table in the schema, they do so with a trailing exclamation point as follows @@ -37,10 +37,10 @@ import scala.collection.immutable.HashMap Consider - Q(x) :- R(x,f) weight=f + Q(x) :- R(x,f) weight=f ... R is likely *not* a variable table ... we record its translation below. - + In contrast, Q(x) :- R(x),S(x) ... coule be treated as variable tables. Hence, the schema has: R(x,f) // regular table @@ -58,9 +58,9 @@ Consider // * The union types for for the parser. * // *************************************** trait Statement -case class Variable(varName : String, relName : String, index : Int ) -case class Atom(name : String, terms : List[Variable]) -case class Attribute(name : String, terms : List[Variable], types : List[String]) +case class Variable(varName : String, relName : String, index : Int ) +case class Atom(name : String, terms : List[Variable]) +case class Attribute(name : String, terms : List[Variable], types : List[String]) case class ConjunctiveQuery(head: Atom, body: List[Atom]) case class Column(name : String, t : String) @@ -82,7 +82,7 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervisi // Parser -class ConjunctiveQueryParser extends JavaTokenParsers { +class ConjunctiveQueryParser extends JavaTokenParsers { // Odd definitions, but we'll keep them. // def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} // def stringliteral2: Parser[String] = """[a-zA-Z_0-9\./]*""".r ^^ {case (x) => x} @@ -124,10 +124,10 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } - def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ - "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ + def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ + "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ "\"" ~ path ~ "\"" ~ "handles" ~ stringliteral ~ "lines" ^^ { - case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ + case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ "\"" ~ d ~ "\"" ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) } @@ -173,7 +173,7 @@ class StatementSchema( statements : List[Statement] ) { var function_schema : Map[String, FunctionElement] = new HashMap[ String, FunctionElement]() - def init() = { + def init() = { // generate the statements. statements.foreach { case SchemaElement(Attribute(r, terms, types),query) => @@ -211,7 +211,7 @@ class StatementSchema( statements : List[Statement] ) { return FunctionElement("0","0","0","0","0") } - } + } // The default is query term. def isQueryTerm( relName : String ): Boolean = { @@ -225,7 +225,7 @@ class StatementSchema( statements : List[Statement] ) { // conditions, select, etc.) class QuerySchema(q : ConjunctiveQuery) { var query_schema = new HashMap[ String, Tuple2[Int,Variable] ]() - + // maps each variable name to a canonical version of itself (first occurence in body in left-to-right order) // index is the index of the subgoal/atom this variable is found in the body. // variable is the complete Variable type for the found variable. @@ -311,11 +311,11 @@ object DeepDiveLogCompiler { val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") val query = s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ generateSQLBody(ss,z.q) }""" - + val dependencyRelation = z.q.body map { case(x) => s"${x.name}"} var dependencies = List[String]() for (e <- dep) { - if (dependencyRelation contains e._2) + if (dependencyRelation contains e._2) dependencies ::= s""" "extraction_rule_${e._1}" """ } val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" @@ -323,7 +323,7 @@ object DeepDiveLogCompiler { val ext = s""" deepdive.extraction.extractors.extraction_rule_${z.q.head.name} { sql: \"\"\" DROP TABLE IF EXISTS ${z.q.head.name}; - CREATE TABLE ${z.q.head.name} AS + CREATE TABLE ${z.q.head.name} AS ${query} \"\"\" style: "sql_extractor" @@ -335,7 +335,7 @@ object DeepDiveLogCompiler { // generate variable schema statements def compileVariableSchema(statements: List[Statement], ss: StatementSchema): CompiledBlocks = { - var schema = Set[String]() + var schema = Set[String]() // generate the statements. statements.foreach { case InferenceRule(q, weights, supervision) => @@ -361,17 +361,17 @@ object DeepDiveLogCompiler { } val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None - + val selectStr = (List(variableColsStr) flatMap (u => u)).mkString(", ") - + val inputQuery = s""" - SELECT ${selectStr} + SELECT ${selectStr} ${ generateSQLBody(ss, r.q) }""" val dependencyRelation = r.q.body map { case(x) => s"${x.name}"} var dependencies = List[String]() for (e <- em) { - if (dependencyRelation contains e._2) + if (dependencyRelation contains e._2) dependencies ::= s""" "extraction_rule_${e._1}" """ } val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" @@ -389,12 +389,12 @@ object DeepDiveLogCompiler { } def compile(r: FunctionRule, index: Int, ss: StatementSchema, dependencies: List[(Int, String)]): CompiledBlocks = { - + val inputQuery = s""" SELECT * FROM ${r.input} """ - val function = ss.resolveFunctionName(r.function) + val function = ss.resolveFunctionName(r.function) // val dependencyRelation = r.q.body map { case(x) => s"${x.name}"} var dependency = List[String]() @@ -406,7 +406,7 @@ object DeepDiveLogCompiler { val dependencyStr = if (dependency.length > 0) s"dependencies: [${dependency.mkString(", ")}]" else "" - + val extractor = s""" deepdive.extraction.extractors.extraction_rule_${index} { input: \"\"\" SELECT * FROM ${r.input} @@ -443,7 +443,7 @@ object DeepDiveLogCompiler { blocks :::= compileNodeRule(r, qs, ss, dep) // edge query - val fakeBody = r.q.head +: r.q.body + val fakeBody = r.q.head +: r.q.body val fakeCQ = ConjunctiveQuery(r.q.head, fakeBody) // we will just use the fakeBody below. val index = r.q.body.length + 1 @@ -461,7 +461,7 @@ object DeepDiveLogCompiler { // factor input query val inputQuery = s""" - SELECT ${selectStr} + SELECT ${selectStr} ${ generateSQLBody(ss, fakeCQ) }""" // factor function @@ -474,7 +474,7 @@ object DeepDiveLogCompiler { s"""?(${w.flatMap(s => resolveColumn(s, ss, qs2, fakeCQ, false)).mkString(", ")})""" } } - + blocks ::= s""" deepdive.inference.factors.factor_${r.q.head.name} { input_query: \"\"\"${inputQuery}\"\"\" @@ -500,21 +500,21 @@ object DeepDiveLogCompiler { /* T(base_attr); S(a1,a2) - Q(x) :- S(x,y),T(y) + Q(x) :- S(x,y),T(y) Should generate. Node query: CREATE TABLE Q AS SELECT 0 as _id, R0.a1 - FROM S as R0,T as R1 + FROM S as R0,T as R1 WHERE R0.a2 = R1.base_attr - + Edge Query (if S and T are probabilistic) SELECT Q._id, array_agg( (S._id, T_.id) ) - FROM Q as R0,S as R1,T as R2 - WHERE S.y = T.base_attr AND - Q.x = S.x AND Q.z = S.z - + FROM Q as R0,S as R1,T as R2 + WHERE S.y = T.base_attr AND + Q.x = S.x AND Q.z = S.z + Factor Function: OR ======= @@ -525,14 +525,14 @@ object DeepDiveLogCompiler { Node Query: CREATE TABLE Q AS SELECT DISTINCT 0 as _id, x FROM R - + Edge Query: SELECT 0 as _fid, Q.id, R.f as w FROM Q, R WHERE Q.x = R.x ======= - + */ def main(args: Array[String]) { // get contents of all given files as one flat input program diff --git a/examples/test6.expected b/examples/test6.expected index 6429ede0e..984ad5e6c 100644 --- a/examples/test6.expected +++ b/examples/test6.expected @@ -28,7 +28,7 @@ deepdive.extraction.extractors.extraction_rule_8 { sql: """ DROP VIEW IF EXISTS ext_people_input; CREATE VIEW ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" FROM sentences R0 """ @@ -50,7 +50,7 @@ deepdive.extraction.extractors.extraction_rule_12 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; CREATE VIEW ext_has_spouse_input AS - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ @@ -72,7 +72,7 @@ deepdive.extraction.extractors.extraction_rule_16 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; CREATE VIEW ext_has_spouse_features_input AS - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ @@ -83,7 +83,7 @@ deepdive.extraction.extractors.extraction_rule_has_spouse { sql: """ DROP TABLE IF EXISTS has_spouse; - CREATE TABLE has_spouse AS + CREATE TABLE has_spouse AS SELECT DISTINCT 0 as id, R0.relation_id, R0.l AS label FROM has_spouse_candidates R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id @@ -95,7 +95,7 @@ deepdive.inference.factors.factor_has_spouse { input_query: """ - SELECT R0.id AS "has_spouse.R0.id" , R0.label AS "has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" + SELECT R0.id AS "has_spouse.R0.id" , R0.label AS "has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(has_spouse.R0.label)" From a62c1e9518d2d11886844d1b58998dd9aff48e48 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 02:04:54 -0700 Subject: [PATCH 029/347] Renames StatementSchema to CompilationState --- DeepDiveLogCompiler.scala | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 56df143d5..a25b47f27 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -156,13 +156,14 @@ class ConjunctiveQueryParser extends JavaTokenParsers { // rules or schema elements in aribitrary order def statement : Parser[Statement] = (functionElement | inferenceRule | extractionRule | functionRule | schemaElement) ^^ {case(x) => x} - def statements : Parser[List[Statement]] = rep1sep(statement, ".") ^^ { case(x) => x } + type Program = List[Statement] + def statements : Parser[Program] = rep1sep(statement, ".") ^^ { case(x) => x } } // This handles the schema statements. // It can tell you if a predicate is a "query" predicate or a "ground prediate" // and it resolves Variables their correct and true name in the schema, i.e. R(x,y) then x could be Attribute1 declared. -class StatementSchema( statements : List[Statement] ) { +class CompilationState( statements : List[Statement] ) { // TODO: refactor the schema into a class that constructs and // manages these maps. Also it should have appropriate // abstractions and error handling for missing values. @@ -263,17 +264,10 @@ object DeepDiveLogCompiler { def parseProgram(inputProgram: String) = parser.parse(parser.statements, inputProgram) type CompiledBlocks = List[String] - class CompilationState( - // schema derived from all statements - ss: Option[StatementSchema], - // all head names - headNames: List[String], - // a handy counter for generating unique names - numRules: Int) // This is generic code that generates the FROM with positional aliasing R0, R1, etc. // and the corresponding WHERE clause (equating all variables) - def generateSQLBody(ss : StatementSchema, z : ConjunctiveQuery) : String = { + def generateSQLBody(ss : CompilationState, z : ConjunctiveQuery) : String = { val bodyNames = ( z.body.zipWithIndex map { case(x,i) => s"${x.name} R${i}"}).mkString(", ") // Simple logic for the where clause, first find every first occurence of a // and stick it in a map. @@ -301,7 +295,7 @@ object DeepDiveLogCompiler { ${ whereClauseStr }""" } // generate the node portion (V) of the factor graph - def compileNodeRule(z: InferenceRule, qs: QuerySchema, ss: StatementSchema, dep: List[(Int, String)]) : CompiledBlocks = { + def compileNodeRule(z: InferenceRule, qs: QuerySchema, ss: CompilationState, dep: List[(Int, String)]) : CompiledBlocks = { val headTerms = z.q.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" } @@ -334,7 +328,7 @@ object DeepDiveLogCompiler { } // generate variable schema statements - def compileVariableSchema(statements: List[Statement], ss: StatementSchema): CompiledBlocks = { + def compileVariableSchema(statements: List[Statement], ss: CompilationState): CompiledBlocks = { var schema = Set[String]() // generate the statements. statements.foreach { @@ -352,7 +346,7 @@ object DeepDiveLogCompiler { } // Generate extraction rule part for deepdive - def compile(r: ExtractionRule, index: Int, ss: StatementSchema, em: List[(Int, String)]): CompiledBlocks = { + def compile(r: ExtractionRule, index: Int, ss: CompilationState, em: List[(Int, String)]): CompiledBlocks = { // Generate the body of the query. val qs = new QuerySchema( r.q ) // variable columns @@ -388,7 +382,7 @@ object DeepDiveLogCompiler { List(extractor) } - def compile(r: FunctionRule, index: Int, ss: StatementSchema, dependencies: List[(Int, String)]): CompiledBlocks = { + def compile(r: FunctionRule, index: Int, ss: CompilationState, dependencies: List[(Int, String)]): CompiledBlocks = { val inputQuery = s""" SELECT * FROM ${r.input} @@ -422,7 +416,7 @@ object DeepDiveLogCompiler { // resolve a column name with alias - def resolveColumn(s: String, ss: StatementSchema, qs: QuerySchema, q : ConjunctiveQuery, + def resolveColumn(s: String, ss: CompilationState, qs: QuerySchema, q : ConjunctiveQuery, alias: Boolean) : Option[String] = { val index = qs.getBodyIndex(s) val name = ss.resolveName(qs.getVar(s)) @@ -434,7 +428,7 @@ object DeepDiveLogCompiler { } // generate inference rule part for deepdive - def compile(r: InferenceRule, i: Int, ss: StatementSchema, dep: List[(Int, String)]): CompiledBlocks = { + def compile(r: InferenceRule, i: Int, ss: CompilationState, dep: List[(Int, String)]): CompiledBlocks = { var blocks = List[String]() val qs = new QuerySchema( r.q ) @@ -540,7 +534,7 @@ object DeepDiveLogCompiler { val parsedProgram = parseProgram(inputProgram) // take an initial pass to analyze the parsed program - val schema = new StatementSchema( parsedProgram.get ) + val state = new CompilationState( parsedProgram.get ) // TODO analyze the real dependency (Map[headname, List[headname]]) here var dependencies = parsedProgram.get.zipWithIndex map { case (e : ExtractionRule, i) => (i, e.q.head.name) @@ -551,7 +545,7 @@ object DeepDiveLogCompiler { // compile the program into blocks of application.conf val compiledBlocks = ( - compileVariableSchema(parsedProgram.get, schema) + compileVariableSchema(parsedProgram.get, state) ::: ( parsedProgram.get.zipWithIndex flatMap { @@ -559,11 +553,11 @@ object DeepDiveLogCompiler { // cases, but Scala/Java's ad-hoc polymorphism doesn't work that way. // Instead, we need to use the visitor pattern, adding compile(...) // methods to all case classes of Statement. - // TODO move schema, dependencies args into a composite field of type CompilationState + // TODO move state, dependencies args into a composite field of type CompilationState // TODO get rid of zipWithIndex by keeping a counter in the CompilationState - case (s:InferenceRule , i:Int) => compile(s, i, schema, dependencies) - case (s:ExtractionRule, i:Int) => compile(s, i, schema, dependencies) - case (s:FunctionRule , i:Int) => compile(s, i, schema, dependencies) + case (s:InferenceRule , i:Int) => compile(s, i, state, dependencies) + case (s:ExtractionRule, i:Int) => compile(s, i, state, dependencies) + case (s:FunctionRule , i:Int) => compile(s, i, state, dependencies) case _ => List() } ) From 5a37d06ec3c7d61950124dc6c1f533aa2b13df69 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 02:08:40 -0700 Subject: [PATCH 030/347] Moves resolveColumn to CompilationState --- DeepDiveLogCompiler.scala | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index a25b47f27..677e5a114 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -218,6 +218,18 @@ class CompilationState( statements : List[Statement] ) { def isQueryTerm( relName : String ): Boolean = { if( ground_relations contains relName ) !ground_relations(relName) else true } + + // resolve a column name with alias + def resolveColumn(s: String, qs: QuerySchema, q : ConjunctiveQuery, alias: Boolean) : Option[String] = { + val index = qs.getBodyIndex(s) + val name = resolveName(qs.getVar(s)) + val relation = q.body(index).name + if (alias) + Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) + else + Some(s"${relation}.R${index}.${name}") + } + } // This is responsible for schema elements within a given query, e.g., @@ -351,7 +363,7 @@ object DeepDiveLogCompiler { val qs = new QuerySchema( r.q ) // variable columns val variableCols = r.q.head.terms flatMap { - case(Variable(v,rr,i)) => resolveColumn(v, ss, qs, r.q, true) + case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, r.q, true) } val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None @@ -415,18 +427,6 @@ object DeepDiveLogCompiler { } - // resolve a column name with alias - def resolveColumn(s: String, ss: CompilationState, qs: QuerySchema, q : ConjunctiveQuery, - alias: Boolean) : Option[String] = { - val index = qs.getBodyIndex(s) - val name = ss.resolveName(qs.getVar(s)) - val relation = q.body(index).name - if (alias) - Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) - else - Some(s"${relation}.R${index}.${name}") - } - // generate inference rule part for deepdive def compile(r: InferenceRule, i: Int, ss: CompilationState, dep: List[(Int, String)]): CompiledBlocks = { var blocks = List[String]() @@ -448,7 +448,7 @@ object DeepDiveLogCompiler { // weight string val uwStr = r.weights match { case KnownFactorWeight(x) => None - case UnknownFactorWeight(w) => Some(w.flatMap(s => resolveColumn(s, ss, qs2, fakeCQ, true)).mkString(", ")) + case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, true)).mkString(", ")) } val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatMap (u => u)).mkString(", ") @@ -465,7 +465,7 @@ object DeepDiveLogCompiler { val weight = r.weights match { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { - s"""?(${w.flatMap(s => resolveColumn(s, ss, qs2, fakeCQ, false)).mkString(", ")})""" + s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, false)).mkString(", ")})""" } } From bcb994cc1d5807fb0b4e6f05c81fd807b2d8efbc Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 02:26:33 -0700 Subject: [PATCH 031/347] Reorders pieces of codes around - Renames dbSettings to compileUserSettings. - Folds compileNodeRule into compile(InferenceRule, ...) --- DeepDiveLogCompiler.scala | 189 ++++++++++++++++---------------------- 1 file changed, 77 insertions(+), 112 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 677e5a114..151451198 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -230,6 +230,35 @@ class CompilationState( statements : List[Statement] ) { Some(s"${relation}.R${index}.${name}") } + // This is generic code that generates the FROM with positional aliasing R0, R1, etc. + // and the corresponding WHERE clause (equating all variables) + def generateSQLBody(z : ConjunctiveQuery) : String = { + val bodyNames = ( z.body.zipWithIndex map { case(x,i) => s"${x.name} R${i}"}).mkString(", ") + // Simple logic for the where clause, first find every first occurence of a + // and stick it in a map. + val qs = new QuerySchema(z) + + val whereClause = z.body.zipWithIndex flatMap { + case (Atom(relName, terms),body_index) => + terms flatMap { + case Variable(varName, relName, index) => + val canonical_body_index = qs.getBodyIndex(varName) + + if (canonical_body_index != body_index) { + val real_attr_name1 = resolveName( Variable(varName, relName, index) ) + val real_attr_name2 = resolveName( qs.getVar(varName)) + Some(s"R${ body_index }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") + } else { None } + } + } + val whereClauseStr = whereClause match { + case Nil => "" + case _ => s"""WHERE ${whereClause.mkString(" AND ")}""" + } + + s"""FROM ${ bodyNames } + ${ whereClauseStr }""" + } } // This is responsible for schema elements within a given query, e.g., @@ -277,66 +306,19 @@ object DeepDiveLogCompiler { type CompiledBlocks = List[String] - // This is generic code that generates the FROM with positional aliasing R0, R1, etc. - // and the corresponding WHERE clause (equating all variables) - def generateSQLBody(ss : CompilationState, z : ConjunctiveQuery) : String = { - val bodyNames = ( z.body.zipWithIndex map { case(x,i) => s"${x.name} R${i}"}).mkString(", ") - // Simple logic for the where clause, first find every first occurence of a - // and stick it in a map. - val qs = new QuerySchema(z) - - val whereClause = z.body.zipWithIndex flatMap { - case (Atom(relName, terms),body_index) => - terms flatMap { - case Variable(varName, relName, index) => - val canonical_body_index = qs.getBodyIndex(varName) - - if (canonical_body_index != body_index) { - val real_attr_name1 = ss.resolveName( Variable(varName, relName, index) ) - val real_attr_name2 = ss.resolveName( qs.getVar(varName)) - Some(s"R${ body_index }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") - } else { None } - } - } - val whereClauseStr = whereClause match { - case Nil => "" - case _ => s"""WHERE ${whereClause.mkString(" AND ")}""" - } - - s"""FROM ${ bodyNames } - ${ whereClauseStr }""" + def compileUserSettings(): CompiledBlocks = { + // TODO read user's proto-application.conf and augment it + List(""" + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} } - // generate the node portion (V) of the factor graph - def compileNodeRule(z: InferenceRule, qs: QuerySchema, ss: CompilationState, dep: List[(Int, String)]) : CompiledBlocks = { - val headTerms = z.q.head.terms map { - case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" - } - val index = qs.getBodyIndex(z.supervision) - val name = ss.resolveName(qs.getVar(z.supervision)) - val labelCol = s"R${index}.${name}" - val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") - val query = s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label - ${ generateSQLBody(ss,z.q) }""" - - val dependencyRelation = z.q.body map { case(x) => s"${x.name}"} - var dependencies = List[String]() - for (e <- dep) { - if (dependencyRelation contains e._2) - dependencies ::= s""" "extraction_rule_${e._1}" """ - } - val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" - - val ext = s""" - deepdive.extraction.extractors.extraction_rule_${z.q.head.name} { - sql: \"\"\" DROP TABLE IF EXISTS ${z.q.head.name}; - CREATE TABLE ${z.q.head.name} AS - ${query} - \"\"\" - style: "sql_extractor" - ${dependencyStr} - } - """ - List(ext) + """) } // generate variable schema statements @@ -372,7 +354,7 @@ object DeepDiveLogCompiler { val inputQuery = s""" SELECT ${selectStr} - ${ generateSQLBody(ss, r.q) }""" + ${ ss.generateSQLBody(r.q) }""" val dependencyRelation = r.q.body map { case(x) => s"${x.name}"} var dependencies = List[String]() @@ -395,7 +377,6 @@ object DeepDiveLogCompiler { } def compile(r: FunctionRule, index: Int, ss: CompilationState, dependencies: List[(Int, String)]): CompiledBlocks = { - val inputQuery = s""" SELECT * FROM ${r.input} """ @@ -426,13 +407,44 @@ object DeepDiveLogCompiler { List(extractor) } - // generate inference rule part for deepdive def compile(r: InferenceRule, i: Int, ss: CompilationState, dep: List[(Int, String)]): CompiledBlocks = { var blocks = List[String]() val qs = new QuerySchema( r.q ) // node query + // generate the node portion (V) of the factor graph + def compileNodeRule(z: InferenceRule, qs: QuerySchema, ss: CompilationState, dep: List[(Int, String)]) : CompiledBlocks = { + val headTerms = z.q.head.terms map { + case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" + } + val index = qs.getBodyIndex(z.supervision) + val name = ss.resolveName(qs.getVar(z.supervision)) + val labelCol = s"R${index}.${name}" + val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") + val query = s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label + ${ ss.generateSQLBody(z.q) }""" + + val dependencyRelation = z.q.body map { case(x) => s"${x.name}"} + var dependencies = List[String]() + for (e <- dep) { + if (dependencyRelation contains e._2) + dependencies ::= s""" "extraction_rule_${e._1}" """ + } + val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" + + val ext = s""" + deepdive.extraction.extractors.extraction_rule_${z.q.head.name} { + sql: \"\"\" DROP TABLE IF EXISTS ${z.q.head.name}; + CREATE TABLE ${z.q.head.name} AS + ${query} + \"\"\" + style: "sql_extractor" + ${dependencyStr} + } + """ + List(ext) + } if (ss.isQueryTerm(r.q.head.name)) blocks :::= compileNodeRule(r, qs, ss, dep) @@ -456,7 +468,7 @@ object DeepDiveLogCompiler { // factor input query val inputQuery = s""" SELECT ${selectStr} - ${ generateSQLBody(ss, fakeCQ) }""" + ${ ss.generateSQLBody(fakeCQ) }""" // factor function val func = s"""Imply(${r.q.head.name}.R0.label)""" @@ -480,54 +492,6 @@ object DeepDiveLogCompiler { blocks.reverse } - def dbSettings() : String = """ - deepdive.db.default { - driver: "org.postgresql.Driver" - url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} - user: ${PGUSER} - password: ${PGPASSWORD} - dbname: ${DBNAME} - host: ${PGHOST} - port: ${PGPORT} - } - """ - /* - T(base_attr); - S(a1,a2) - Q(x) :- S(x,y),T(y) - Should generate. - - Node query: - CREATE TABLE Q AS - SELECT 0 as _id, R0.a1 - FROM S as R0,T as R1 - WHERE R0.a2 = R1.base_attr - - Edge Query (if S and T are probabilistic) - SELECT Q._id, array_agg( (S._id, T_.id) ) - FROM Q as R0,S as R1,T as R2 - WHERE S.y = T.base_attr AND - Q.x = S.x AND Q.z = S.z - - Factor Function: OR - - ======= - R(x,y) (assume non probabilistic) - - Q(x) :- R(x,f) weight=f - - Node Query: - CREATE TABLE Q AS - SELECT DISTINCT 0 as _id, x FROM R - - Edge Query: - SELECT 0 as _fid, Q.id, R.f as w - FROM Q, R - WHERE Q.x = R.x - - ======= - - */ def main(args: Array[String]) { // get contents of all given files as one flat input program val inputProgram = parseArgs(args) @@ -545,6 +509,8 @@ object DeepDiveLogCompiler { // compile the program into blocks of application.conf val compiledBlocks = ( + compileUserSettings + ::: compileVariableSchema(parsedProgram.get, state) ::: ( @@ -564,7 +530,6 @@ object DeepDiveLogCompiler { ) // emit the generated code - println(dbSettings()) // TODO read user's proto-application.conf and augment it compiledBlocks foreach println } } From abc2f17aad53f9a1cbeff81c0ab19ed5fb9cd349 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 02:47:58 -0700 Subject: [PATCH 032/347] Moves dependencies to CompilationState.headNames --- DeepDiveLogCompiler.scala | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 151451198..7b08f4987 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -174,6 +174,8 @@ class CompilationState( statements : List[Statement] ) { var function_schema : Map[String, FunctionElement] = new HashMap[ String, FunctionElement]() + var headNames : List[(Int, String)] = List() + def init() = { // generate the statements. statements.foreach { @@ -188,6 +190,15 @@ class CompilationState( statements : List[Statement] ) { case FunctionElement(a, b, c, d, e) => function_schema += {a -> FunctionElement(a, b, c, d, e)} case FunctionRule(_,_,_) => () } + + // TODO analyze the real dependency (Map[headname, List[headname]]) here + headNames = statements.zipWithIndex flatMap { + case (e : ExtractionRule, i) => Some(i, e.q.head.name) + case (f : FunctionRule , i) => Some(i, f.output ) + case (w : InferenceRule , i) => Some(i, w.q.head.name) + case _ => None + } + } init() @@ -340,7 +351,7 @@ object DeepDiveLogCompiler { } // Generate extraction rule part for deepdive - def compile(r: ExtractionRule, index: Int, ss: CompilationState, em: List[(Int, String)]): CompiledBlocks = { + def compile(r: ExtractionRule, index: Int, ss: CompilationState): CompiledBlocks = { // Generate the body of the query. val qs = new QuerySchema( r.q ) // variable columns @@ -356,6 +367,7 @@ object DeepDiveLogCompiler { SELECT ${selectStr} ${ ss.generateSQLBody(r.q) }""" + val em = ss.headNames // TODO move this dependency analysis to a separate method val dependencyRelation = r.q.body map { case(x) => s"${x.name}"} var dependencies = List[String]() for (e <- em) { @@ -376,13 +388,14 @@ object DeepDiveLogCompiler { List(extractor) } - def compile(r: FunctionRule, index: Int, ss: CompilationState, dependencies: List[(Int, String)]): CompiledBlocks = { + def compile(r: FunctionRule, index: Int, ss: CompilationState): CompiledBlocks = { val inputQuery = s""" SELECT * FROM ${r.input} """ val function = ss.resolveFunctionName(r.function) + val dependencies = ss.headNames // TODO move this dependency analysis to a separate method // val dependencyRelation = r.q.body map { case(x) => s"${x.name}"} var dependency = List[String]() for (d <- dependencies) { @@ -408,13 +421,13 @@ object DeepDiveLogCompiler { } // generate inference rule part for deepdive - def compile(r: InferenceRule, i: Int, ss: CompilationState, dep: List[(Int, String)]): CompiledBlocks = { + def compile(r: InferenceRule, i: Int, ss: CompilationState): CompiledBlocks = { var blocks = List[String]() val qs = new QuerySchema( r.q ) // node query // generate the node portion (V) of the factor graph - def compileNodeRule(z: InferenceRule, qs: QuerySchema, ss: CompilationState, dep: List[(Int, String)]) : CompiledBlocks = { + def compileNodeRule(z: InferenceRule, qs: QuerySchema, ss: CompilationState) : CompiledBlocks = { val headTerms = z.q.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" } @@ -425,6 +438,7 @@ object DeepDiveLogCompiler { val query = s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ ss.generateSQLBody(z.q) }""" + val dep = ss.headNames val dependencyRelation = z.q.body map { case(x) => s"${x.name}"} var dependencies = List[String]() for (e <- dep) { @@ -446,7 +460,7 @@ object DeepDiveLogCompiler { List(ext) } if (ss.isQueryTerm(r.q.head.name)) - blocks :::= compileNodeRule(r, qs, ss, dep) + blocks :::= compileNodeRule(r, qs, ss) // edge query val fakeBody = r.q.head +: r.q.body @@ -499,13 +513,6 @@ object DeepDiveLogCompiler { // take an initial pass to analyze the parsed program val state = new CompilationState( parsedProgram.get ) - // TODO analyze the real dependency (Map[headname, List[headname]]) here - var dependencies = parsedProgram.get.zipWithIndex map { - case (e : ExtractionRule, i) => (i, e.q.head.name) - case (f : FunctionRule, i) => (i, f.output) - case (w : InferenceRule, i) => (i, w.q.head.name) - case (_,_) => (-1, "-1") - } // compile the program into blocks of application.conf val compiledBlocks = ( @@ -521,9 +528,9 @@ object DeepDiveLogCompiler { // methods to all case classes of Statement. // TODO move state, dependencies args into a composite field of type CompilationState // TODO get rid of zipWithIndex by keeping a counter in the CompilationState - case (s:InferenceRule , i:Int) => compile(s, i, state, dependencies) - case (s:ExtractionRule, i:Int) => compile(s, i, state, dependencies) - case (s:FunctionRule , i:Int) => compile(s, i, state, dependencies) + case (s:InferenceRule , i:Int) => compile(s, i, state) + case (s:ExtractionRule, i:Int) => compile(s, i, state) + case (s:FunctionRule , i:Int) => compile(s, i, state) case _ => List() } ) From 2cfabcc43c29849662e7753487928088d5b42544 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 03:04:53 -0700 Subject: [PATCH 033/347] Gets rid of the Int argument for compile() methods by defining a (extractor) block name resolving function in the CompilationState --- DeepDiveLogCompiler.scala | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 7b08f4987..9571e9f5b 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -203,6 +203,13 @@ class CompilationState( statements : List[Statement] ) { init() + // Given a statement, resolve its name for the compiled extractor block. + def resolveExtractorBlockName(s: Statement): String = s match { + case s: ExtractionRule => s"extraction_rule_${statements indexOf s}" + case s: FunctionRule => s"extraction_rule_${statements indexOf s}" + case s: InferenceRule => s"extraction_rule_${s.q.head.name}" + } + // Given a variable, resolve it. TODO: This should give a warning, // if we encouter a variable that is not in this map, then something // odd has happened. @@ -351,7 +358,7 @@ object DeepDiveLogCompiler { } // Generate extraction rule part for deepdive - def compile(r: ExtractionRule, index: Int, ss: CompilationState): CompiledBlocks = { + def compile(r: ExtractionRule, ss: CompilationState): CompiledBlocks = { // Generate the body of the query. val qs = new QuerySchema( r.q ) // variable columns @@ -376,8 +383,9 @@ object DeepDiveLogCompiler { } val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" + val blockName = ss.resolveExtractorBlockName(r) val extractor = s""" - deepdive.extraction.extractors.extraction_rule_${index} { + deepdive.extraction.extractors.${blockName} { sql: \"\"\" DROP VIEW IF EXISTS ${r.q.head.name}; CREATE VIEW ${r.q.head.name} AS ${inputQuery} \"\"\" @@ -388,7 +396,7 @@ object DeepDiveLogCompiler { List(extractor) } - def compile(r: FunctionRule, index: Int, ss: CompilationState): CompiledBlocks = { + def compile(r: FunctionRule, ss: CompilationState): CompiledBlocks = { val inputQuery = s""" SELECT * FROM ${r.input} """ @@ -407,8 +415,9 @@ object DeepDiveLogCompiler { + val blockName = ss.resolveExtractorBlockName(r) val extractor = s""" - deepdive.extraction.extractors.extraction_rule_${index} { + deepdive.extraction.extractors.${blockName} { input: \"\"\" SELECT * FROM ${r.input} \"\"\" output_relation: \"${r.output}\" @@ -421,7 +430,7 @@ object DeepDiveLogCompiler { } // generate inference rule part for deepdive - def compile(r: InferenceRule, i: Int, ss: CompilationState): CompiledBlocks = { + def compile(r: InferenceRule, ss: CompilationState): CompiledBlocks = { var blocks = List[String]() val qs = new QuerySchema( r.q ) @@ -447,8 +456,9 @@ object DeepDiveLogCompiler { } val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" + val blockName = ss.resolveExtractorBlockName(z) val ext = s""" - deepdive.extraction.extractors.extraction_rule_${z.q.head.name} { + deepdive.extraction.extractors.${blockName} { sql: \"\"\" DROP TABLE IF EXISTS ${z.q.head.name}; CREATE TABLE ${z.q.head.name} AS ${query} @@ -521,16 +531,14 @@ object DeepDiveLogCompiler { compileVariableSchema(parsedProgram.get, state) ::: ( - parsedProgram.get.zipWithIndex flatMap { + parsedProgram.get flatMap { // XXX Ideally, a single compile call should handle all the polymorphic // cases, but Scala/Java's ad-hoc polymorphism doesn't work that way. // Instead, we need to use the visitor pattern, adding compile(...) // methods to all case classes of Statement. - // TODO move state, dependencies args into a composite field of type CompilationState - // TODO get rid of zipWithIndex by keeping a counter in the CompilationState - case (s:InferenceRule , i:Int) => compile(s, i, state) - case (s:ExtractionRule, i:Int) => compile(s, i, state) - case (s:FunctionRule , i:Int) => compile(s, i, state) + case s:InferenceRule => compile(s, state) + case s:ExtractionRule => compile(s, state) + case s:FunctionRule => compile(s, state) case _ => List() } ) From 85015a907dcbe4c5a2351d508886e14b61cc5b67 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 03:34:14 -0700 Subject: [PATCH 034/347] Refactors dependencies generation --- DeepDiveLogCompiler.scala | 77 ++++++++++++++++++--------------------- examples/test6.expected | 2 +- 2 files changed, 36 insertions(+), 43 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 9571e9f5b..4feb3a1ce 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -174,7 +174,8 @@ class CompilationState( statements : List[Statement] ) { var function_schema : Map[String, FunctionElement] = new HashMap[ String, FunctionElement]() - var headNames : List[(Int, String)] = List() + // The dependency graph between statements. + var dependencies : Map[Statement, Set[Statement]] = new HashMap() def init() = { // generate the statements. @@ -191,14 +192,7 @@ class CompilationState( statements : List[Statement] ) { case FunctionRule(_,_,_) => () } - // TODO analyze the real dependency (Map[headname, List[headname]]) here - headNames = statements.zipWithIndex flatMap { - case (e : ExtractionRule, i) => Some(i, e.q.head.name) - case (f : FunctionRule , i) => Some(i, f.output ) - case (w : InferenceRule , i) => Some(i, w.q.head.name) - case _ => None - } - + analyzeDependency(statements) } init() @@ -277,6 +271,35 @@ class CompilationState( statements : List[Statement] ) { s"""FROM ${ bodyNames } ${ whereClauseStr }""" } + + + // Analyze the dependency between statements and construct a graph. + def analyzeDependency(statements: List[Statement]) = { + // first map head names to the actual statement + var stmtByHeadName = new HashMap[String, Statement]() + statements foreach { + case e : ExtractionRule => stmtByHeadName += { e.q.head.name -> e } + case f : FunctionRule => stmtByHeadName += { f.output -> f } + case w : InferenceRule => stmtByHeadName += { w.q.head.name -> w } + case _ => + } + // then, look at the body of each statement to construct a dependency graph + statements foreach { + case f : FunctionRule => dependencies += { f -> ( Some(f.input) flatMap (stmtByHeadName get _)).toSet } + case e : ExtractionRule => dependencies += { e -> (e.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet } + case w : InferenceRule => dependencies += { w -> (w.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet } + case _ => + } + } + // Generates a "dependencies" value for a compiled block of given statement. + def generateDependenciesOfCompiledBlockFor(statement: Statement): String = { + val dependentExtractorBlockNames = + dependencies getOrElse (statement, Set()) map resolveExtractorBlockName + if (dependentExtractorBlockNames.size == 0) "" else { + val depStr = dependentExtractorBlockNames map {" \"" + _ + "\" "} mkString(", ") + s"dependencies: [${depStr}]" + } + } } // This is responsible for schema elements within a given query, e.g., @@ -374,15 +397,6 @@ object DeepDiveLogCompiler { SELECT ${selectStr} ${ ss.generateSQLBody(r.q) }""" - val em = ss.headNames // TODO move this dependency analysis to a separate method - val dependencyRelation = r.q.body map { case(x) => s"${x.name}"} - var dependencies = List[String]() - for (e <- em) { - if (dependencyRelation contains e._2) - dependencies ::= s""" "extraction_rule_${e._1}" """ - } - val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" - val blockName = ss.resolveExtractorBlockName(r) val extractor = s""" deepdive.extraction.extractors.${blockName} { @@ -390,7 +404,7 @@ object DeepDiveLogCompiler { CREATE VIEW ${r.q.head.name} AS ${inputQuery} \"\"\" style: "sql_extractor" - ${dependencyStr} + ${ss.generateDependenciesOfCompiledBlockFor(r)} } """ List(extractor) @@ -403,18 +417,6 @@ object DeepDiveLogCompiler { val function = ss.resolveFunctionName(r.function) - val dependencies = ss.headNames // TODO move this dependency analysis to a separate method - // val dependencyRelation = r.q.body map { case(x) => s"${x.name}"} - var dependency = List[String]() - for (d <- dependencies) { - if (r.input == d._2) { - dependency ::= s""" "extraction_rule_${d._1}" """ - } - } - val dependencyStr = if (dependency.length > 0) s"dependencies: [${dependency.mkString(", ")}]" else "" - - - val blockName = ss.resolveExtractorBlockName(r) val extractor = s""" deepdive.extraction.extractors.${blockName} { @@ -423,7 +425,7 @@ object DeepDiveLogCompiler { output_relation: \"${r.output}\" udf: \"${function.implementation}\" style: \"${function.mode}_extractor\" - ${dependencyStr} + ${ss.generateDependenciesOfCompiledBlockFor(r)} } """ List(extractor) @@ -447,15 +449,6 @@ object DeepDiveLogCompiler { val query = s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ ss.generateSQLBody(z.q) }""" - val dep = ss.headNames - val dependencyRelation = z.q.body map { case(x) => s"${x.name}"} - var dependencies = List[String]() - for (e <- dep) { - if (dependencyRelation contains e._2) - dependencies ::= s""" "extraction_rule_${e._1}" """ - } - val dependencyStr = if (dependencies.length > 0) s"dependencies: [${dependencies.mkString(", ")}]" else "" - val blockName = ss.resolveExtractorBlockName(z) val ext = s""" deepdive.extraction.extractors.${blockName} { @@ -464,7 +457,7 @@ object DeepDiveLogCompiler { ${query} \"\"\" style: "sql_extractor" - ${dependencyStr} + ${ss.generateDependenciesOfCompiledBlockFor(z)} } """ List(ext) diff --git a/examples/test6.expected b/examples/test6.expected index 984ad5e6c..cb5252ee9 100644 --- a/examples/test6.expected +++ b/examples/test6.expected @@ -89,7 +89,7 @@ WHERE R1.relation_id = R0.relation_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_14" , "extraction_rule_10" ] + dependencies: [ "extraction_rule_10" , "extraction_rule_14" ] } From 8d1491795f1ca4201848662fb483a95e47bcebde Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 03:40:13 -0700 Subject: [PATCH 035/347] Cleans up redundant .get --- DeepDiveLogCompiler.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 4feb3a1ce..ff1904a28 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -512,19 +512,19 @@ object DeepDiveLogCompiler { def main(args: Array[String]) { // get contents of all given files as one flat input program val inputProgram = parseArgs(args) - val parsedProgram = parseProgram(inputProgram) + val parsedProgram = parseProgram(inputProgram).get // take an initial pass to analyze the parsed program - val state = new CompilationState( parsedProgram.get ) + val state = new CompilationState( parsedProgram ) // compile the program into blocks of application.conf val compiledBlocks = ( compileUserSettings ::: - compileVariableSchema(parsedProgram.get, state) + compileVariableSchema(parsedProgram, state) ::: ( - parsedProgram.get flatMap { + parsedProgram flatMap { // XXX Ideally, a single compile call should handle all the polymorphic // cases, but Scala/Java's ad-hoc polymorphism doesn't work that way. // Instead, we need to use the visitor pattern, adding compile(...) From 0b7296834436e37a36cc7541d3c9ec9c2b4d75d8 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 03:57:39 -0700 Subject: [PATCH 036/347] Turns compile() functions into methods (Visitor pattern) --- DeepDiveLogCompiler.scala | 195 +++++++++++++++++++------------------- 1 file changed, 96 insertions(+), 99 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index ff1904a28..3f8ef00f9 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -57,7 +57,6 @@ Consider // *************************************** // * The union types for for the parser. * // *************************************** -trait Statement case class Variable(varName : String, relName : String, index : Int ) case class Atom(name : String, terms : List[Variable]) case class Attribute(name : String, terms : List[Variable], types : List[String]) @@ -74,13 +73,6 @@ case class KnownFactorWeight(value: Double) extends FactorWeight { case class UnknownFactorWeight(variables: List[String]) extends FactorWeight -case class SchemaElement( a : Attribute , query : Boolean ) extends Statement // atom and whether this is a query relation. -case class FunctionElement( functionName: String, input: String, output: String, implementation: String, mode: String) extends Statement -case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule -case class FunctionRule(input : String, output : String, function : String) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String) extends Statement // Weighted rule - - // Parser class ConjunctiveQueryParser extends JavaTokenParsers { // Odd definitions, but we'll keep them. @@ -331,62 +323,23 @@ class QuerySchema(q : ConjunctiveQuery) { } -// The compiler -object DeepDiveLogCompiler { - - def parseArgs(args: Array[String]) = { - val getContents = (filename: String) => { - val source = scala.io.Source.fromFile(filename) - try source.getLines mkString "\n" finally source.close() - } - args.map(getContents).reduce(_ ++ _) - } - - val parser = new ConjunctiveQueryParser - def parseProgram(inputProgram: String) = parser.parse(parser.statements, inputProgram) - - type CompiledBlocks = List[String] - - def compileUserSettings(): CompiledBlocks = { - // TODO read user's proto-application.conf and augment it - List(""" - deepdive.db.default { - driver: "org.postgresql.Driver" - url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} - user: ${PGUSER} - password: ${PGPASSWORD} - dbname: ${DBNAME} - host: ${PGHOST} - port: ${PGPORT} - } - """) - } - - // generate variable schema statements - def compileVariableSchema(statements: List[Statement], ss: CompilationState): CompiledBlocks = { - var schema = Set[String]() - // generate the statements. - statements.foreach { - case InferenceRule(q, weights, supervision) => - val qs = new QuerySchema(q) - schema += s"${q.head.name}.label: Boolean" - case _ => () - } - val ddSchema = s""" - deepdive.schema.variables { - ${schema.mkString("\n")} - } - """ - List(ddSchema) - } +// Statements that will be parsed and compiled +trait Statement { + type CompiledBlocks = DeepDiveLogCompiler.CompiledBlocks + def compile(state: CompilationState): CompiledBlocks = List() +} +case class SchemaElement( a : Attribute , query : Boolean ) extends Statement // atom and whether this is a query relation. +case class FunctionElement( functionName: String, input: String, output: String, implementation: String, mode: String) extends Statement +case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule +{ // Generate extraction rule part for deepdive - def compile(r: ExtractionRule, ss: CompilationState): CompiledBlocks = { + override def compile(ss: CompilationState): CompiledBlocks = { // Generate the body of the query. - val qs = new QuerySchema( r.q ) + val qs = new QuerySchema( q ) // variable columns - val variableCols = r.q.head.terms flatMap { - case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, r.q, true) + val variableCols = q.head.terms flatMap { + case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, q, true) } val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None @@ -395,46 +348,50 @@ object DeepDiveLogCompiler { val inputQuery = s""" SELECT ${selectStr} - ${ ss.generateSQLBody(r.q) }""" + ${ ss.generateSQLBody(q) }""" - val blockName = ss.resolveExtractorBlockName(r) + val blockName = ss.resolveExtractorBlockName(this) val extractor = s""" deepdive.extraction.extractors.${blockName} { - sql: \"\"\" DROP VIEW IF EXISTS ${r.q.head.name}; - CREATE VIEW ${r.q.head.name} AS ${inputQuery} + sql: \"\"\" DROP VIEW IF EXISTS ${q.head.name}; + CREATE VIEW ${q.head.name} AS ${inputQuery} \"\"\" style: "sql_extractor" - ${ss.generateDependenciesOfCompiledBlockFor(r)} + ${ss.generateDependenciesOfCompiledBlockFor(this)} } """ List(extractor) } - - def compile(r: FunctionRule, ss: CompilationState): CompiledBlocks = { +} +case class FunctionRule(input : String, output : String, function : String) extends Statement // Extraction rule +{ + override def compile(ss: CompilationState): CompiledBlocks = { val inputQuery = s""" - SELECT * FROM ${r.input} + SELECT * FROM ${input} """ - val function = ss.resolveFunctionName(r.function) + val function = ss.resolveFunctionName(this.function) - val blockName = ss.resolveExtractorBlockName(r) + val blockName = ss.resolveExtractorBlockName(this) val extractor = s""" deepdive.extraction.extractors.${blockName} { - input: \"\"\" SELECT * FROM ${r.input} + input: \"\"\" SELECT * FROM ${input} \"\"\" - output_relation: \"${r.output}\" + output_relation: \"${output}\" udf: \"${function.implementation}\" style: \"${function.mode}_extractor\" - ${ss.generateDependenciesOfCompiledBlockFor(r)} + ${ss.generateDependenciesOfCompiledBlockFor(this)} } """ List(extractor) } - +} +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String) extends Statement // Weighted rule +{ // generate inference rule part for deepdive - def compile(r: InferenceRule, ss: CompilationState): CompiledBlocks = { + override def compile(ss: CompilationState): CompiledBlocks = { var blocks = List[String]() - val qs = new QuerySchema( r.q ) + val qs = new QuerySchema( q ) // node query // generate the node portion (V) of the factor graph @@ -462,20 +419,20 @@ object DeepDiveLogCompiler { """ List(ext) } - if (ss.isQueryTerm(r.q.head.name)) - blocks :::= compileNodeRule(r, qs, ss) + if (ss.isQueryTerm(q.head.name)) + blocks :::= compileNodeRule(this, qs, ss) // edge query - val fakeBody = r.q.head +: r.q.body - val fakeCQ = ConjunctiveQuery(r.q.head, fakeBody) // we will just use the fakeBody below. + val fakeBody = q.head +: q.body + val fakeCQ = ConjunctiveQuery(q.head, fakeBody) // we will just use the fakeBody below. - val index = r.q.body.length + 1 + val index = q.body.length + 1 val qs2 = new QuerySchema( fakeCQ ) - val variableIdsStr = Some(s"""R0.id AS "${r.q.head.name}.R0.id" """) - val variableColsStr = Some(s"""R0.label AS "${r.q.head.name}.R0.label" """) + val variableIdsStr = Some(s"""R0.id AS "${q.head.name}.R0.id" """) + val variableColsStr = Some(s"""R0.label AS "${q.head.name}.R0.label" """) // weight string - val uwStr = r.weights match { + val uwStr = weights match { case KnownFactorWeight(x) => None case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, true)).mkString(", ")) } @@ -488,10 +445,10 @@ object DeepDiveLogCompiler { ${ ss.generateSQLBody(fakeCQ) }""" // factor function - val func = s"""Imply(${r.q.head.name}.R0.label)""" + val func = s"""Imply(${q.head.name}.R0.label)""" // weight - val weight = r.weights match { + val weight = weights match { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, false)).mkString(", ")})""" @@ -499,7 +456,7 @@ object DeepDiveLogCompiler { } blocks ::= s""" - deepdive.inference.factors.factor_${r.q.head.name} { + deepdive.inference.factors.factor_${q.head.name} { input_query: \"\"\"${inputQuery}\"\"\" function: "${func}" weight: "${weight}" @@ -508,6 +465,57 @@ object DeepDiveLogCompiler { blocks.reverse } +} + + +// Compiler object that wires up everything together +object DeepDiveLogCompiler { + + def parseArgs(args: Array[String]) = { + val getContents = (filename: String) => { + val source = scala.io.Source.fromFile(filename) + try source.getLines mkString "\n" finally source.close() + } + args.map(getContents).reduce(_ ++ _) + } + + val parser = new ConjunctiveQueryParser + def parseProgram(inputProgram: String) = parser.parse(parser.statements, inputProgram) + + type CompiledBlocks = List[String] + + def compileUserSettings(): CompiledBlocks = { + // TODO read user's proto-application.conf and augment it + List(""" + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + } + """) + } + + // generate variable schema statements + def compileVariableSchema(statements: List[Statement], ss: CompilationState): CompiledBlocks = { + var schema = Set[String]() + // generate the statements. + statements.foreach { + case InferenceRule(q, weights, supervision) => + val qs = new QuerySchema(q) + schema += s"${q.head.name}.label: Boolean" + case _ => () + } + val ddSchema = s""" + deepdive.schema.variables { + ${schema.mkString("\n")} + } + """ + List(ddSchema) + } def main(args: Array[String]) { // get contents of all given files as one flat input program @@ -523,18 +531,7 @@ object DeepDiveLogCompiler { ::: compileVariableSchema(parsedProgram, state) ::: - ( - parsedProgram flatMap { - // XXX Ideally, a single compile call should handle all the polymorphic - // cases, but Scala/Java's ad-hoc polymorphism doesn't work that way. - // Instead, we need to use the visitor pattern, adding compile(...) - // methods to all case classes of Statement. - case s:InferenceRule => compile(s, state) - case s:ExtractionRule => compile(s, state) - case s:FunctionRule => compile(s, state) - case _ => List() - } - ) + (parsedProgram flatMap (_.compile(state))) ) // emit the generated code From b20236e143fcc780642764caee151771d6721ab4 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 04:09:14 -0700 Subject: [PATCH 037/347] minor cleanup --- DeepDiveLogCompiler.scala | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 3f8ef00f9..cede0c360 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -1,5 +1,7 @@ -import scala.util.parsing.combinator._ -import scala.collection.immutable.HashMap +// DeepDiveLog compiler +// See: https://docs.google.com/document/d/1SBIvvki3mnR28Mf0Pkin9w9mWNam5AA0SpIGj1ZN2c4 + +// TODO update the following comment to new syntax. /* This file parses an extended form of datalog like sugar. @@ -54,6 +56,9 @@ Consider unordered attributes, etc.). */ +import scala.util.parsing.combinator._ +import scala.collection.immutable.HashMap + // *************************************** // * The union types for for the parser. * // *************************************** @@ -148,14 +153,14 @@ class ConjunctiveQueryParser extends JavaTokenParsers { // rules or schema elements in aribitrary order def statement : Parser[Statement] = (functionElement | inferenceRule | extractionRule | functionRule | schemaElement) ^^ {case(x) => x} - type Program = List[Statement] - def statements : Parser[Program] = rep1sep(statement, ".") ^^ { case(x) => x } + def program : Parser[List[Statement]] = rep1sep(statement, ".") ^^ { case(x) => x } } + // This handles the schema statements. // It can tell you if a predicate is a "query" predicate or a "ground prediate" // and it resolves Variables their correct and true name in the schema, i.e. R(x,y) then x could be Attribute1 declared. -class CompilationState( statements : List[Statement] ) { +class CompilationState( statements : DeepDiveLogCompiler.Program ) { // TODO: refactor the schema into a class that constructs and // manages these maps. Also it should have appropriate // abstractions and error handling for missing values. @@ -325,6 +330,8 @@ class QuerySchema(q : ConjunctiveQuery) { // Statements that will be parsed and compiled +// Statement-local compilation logic is kept in each case class' compile() method. +// Any global compilation logic should be kept in DeepDiveLogCompiler object. trait Statement { type CompiledBlocks = DeepDiveLogCompiler.CompiledBlocks def compile(state: CompilationState): CompiledBlocks = List() @@ -471,6 +478,10 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervisi // Compiler object that wires up everything together object DeepDiveLogCompiler { + type Program = List[Statement] + type CompiledBlock = String + type CompiledBlocks = List[CompiledBlock] + def parseArgs(args: Array[String]) = { val getContents = (filename: String) => { val source = scala.io.Source.fromFile(filename) @@ -480,9 +491,9 @@ object DeepDiveLogCompiler { } val parser = new ConjunctiveQueryParser - def parseProgram(inputProgram: String) = parser.parse(parser.statements, inputProgram) - - type CompiledBlocks = List[String] + def parseProgram(inputProgram: String) = { + parser.parse(parser.program, inputProgram) + } def compileUserSettings(): CompiledBlocks = { // TODO read user's proto-application.conf and augment it @@ -500,7 +511,7 @@ object DeepDiveLogCompiler { } // generate variable schema statements - def compileVariableSchema(statements: List[Statement], ss: CompilationState): CompiledBlocks = { + def compileVariableSchema(statements: Program, ss: CompilationState): CompiledBlocks = { var schema = Set[String]() // generate the statements. statements.foreach { @@ -517,6 +528,7 @@ object DeepDiveLogCompiler { List(ddSchema) } + // entry point for command-line interface def main(args: Array[String]) { // get contents of all given files as one flat input program val inputProgram = parseArgs(args) @@ -526,7 +538,7 @@ object DeepDiveLogCompiler { val state = new CompilationState( parsedProgram ) // compile the program into blocks of application.conf - val compiledBlocks = ( + val blocks = ( compileUserSettings ::: compileVariableSchema(parsedProgram, state) @@ -535,6 +547,6 @@ object DeepDiveLogCompiler { ) // emit the generated code - compiledBlocks foreach println + blocks foreach println } } From 116b210ea9c97d6d51cb374063b5ebcb240deba1 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 04:12:58 -0700 Subject: [PATCH 038/347] Separates parser defining syntax from the compiler --- DeepDiveLogCompiler.scala | 101 -------------------------------------- DeepDiveLogParser.scala | 100 +++++++++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+), 101 deletions(-) create mode 100644 DeepDiveLogParser.scala diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index cede0c360..dd0f44a8b 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -56,107 +56,8 @@ Consider unordered attributes, etc.). */ -import scala.util.parsing.combinator._ import scala.collection.immutable.HashMap -// *************************************** -// * The union types for for the parser. * -// *************************************** -case class Variable(varName : String, relName : String, index : Int ) -case class Atom(name : String, terms : List[Variable]) -case class Attribute(name : String, terms : List[Variable], types : List[String]) -case class ConjunctiveQuery(head: Atom, body: List[Atom]) -case class Column(name : String, t : String) - -sealed trait FactorWeight { - def variables : List[String] -} - -case class KnownFactorWeight(value: Double) extends FactorWeight { - def variables = Nil -} - -case class UnknownFactorWeight(variables: List[String]) extends FactorWeight - -// Parser -class ConjunctiveQueryParser extends JavaTokenParsers { - // Odd definitions, but we'll keep them. - // def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} - // def stringliteral2: Parser[String] = """[a-zA-Z_0-9\./]*""".r ^^ {case (x) => x} - // def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} - def stringliteral: Parser[String] = """[a-zA-Z0-9_\[\]]+""".r - def path: Parser[String] = """[a-zA-Z0-9\./_]+""".r - - // relation names and columns are just strings. - def relation_name: Parser[String] = stringliteral ^^ {case (x) => x} - def col : Parser[String] = stringliteral ^^ { case(x) => x } - def attr : Parser[Column] = stringliteral ~ stringliteral ^^ { - case(x ~ y) => Column(x, y) - } - - def atom: Parser[Atom] = relation_name ~ "(" ~ rep1sep(col, ",") ~ ")" ^^ { - case (r ~ "(" ~ cols ~ ")") => { - val vars = cols.zipWithIndex map { case(name,i) => Variable(name, r, i) } - Atom(r,vars) - } - } - - def attribute: Parser[Attribute] = relation_name ~ "(" ~ rep1sep(attr, ",") ~ ")" ^^ { - case (r ~ "(" ~ attrs ~ ")") => { - val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } - var types = attrs map { case(x) => x.t } - Attribute(r,vars, types) - } - } - - def udf : Parser[String] = stringliteral ^^ {case (x) => x} - - def query : Parser[ConjunctiveQuery] = atom ~ ":-" ~ rep1sep(atom, ",") ^^ { - case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) - } - - def schemaElement : Parser[SchemaElement] = attribute ~ opt("?") ^^ { - case (a ~ None) => SchemaElement(a,true) - case (a ~ Some(_)) => SchemaElement(a,false) - } - - - def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ - "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ - "\"" ~ path ~ "\"" ~ "handles" ~ stringliteral ~ "lines" ^^ { - case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ - "\"" ~ d ~ "\"" ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) - } - - def extractionRule : Parser[ExtractionRule] = query ^^ { - case (q) => ExtractionRule(q) - // case (q ~ "udf" ~ "=" ~ None) => ExtractionRule(q,None) - } - - def functionRule : Parser[FunctionRule] = stringliteral ~ ":-" ~ "!" ~ stringliteral ~ "(" ~ stringliteral ~ ")" ^^ { - case (a ~ ":-" ~ "!" ~ b ~ "(" ~ c ~ ")") => FunctionRule(c, a, b) - } - - def constantWeight = "weight" ~> "=" ~> """-?[\d\.]+""".r ^^ { x => KnownFactorWeight(x.toDouble) } - def unknwonWeight = "weight" ~> "=" ~> opt(rep1sep(col, ",")) ^^ { - case Some(varList) => UnknownFactorWeight(varList.toList) - case _ => UnknownFactorWeight(List()) - } - def factorWeight = constantWeight | unknwonWeight - - def supervision = "label" ~> "=" ~> col - - def inferenceRule : Parser[InferenceRule] = query ~ factorWeight ~ supervision ^^ { - case (q ~ weight ~ supervision) => InferenceRule(q, weight, supervision) - } - - // rules or schema elements in aribitrary order - def statement : Parser[Statement] = (functionElement | inferenceRule | extractionRule | functionRule | schemaElement) ^^ {case(x) => x} - - def program : Parser[List[Statement]] = rep1sep(statement, ".") ^^ { case(x) => x } -} - - // This handles the schema statements. // It can tell you if a predicate is a "query" predicate or a "ground prediate" // and it resolves Variables their correct and true name in the schema, i.e. R(x,y) then x could be Attribute1 declared. @@ -325,7 +226,6 @@ class QuerySchema(q : ConjunctiveQuery) { // accessors def getBodyIndex( varName : String ) : Int = { query_schema(varName)._1 } def getVar(varName : String ) : Variable = { query_schema(varName)._2 } - } @@ -477,7 +377,6 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervisi // Compiler object that wires up everything together object DeepDiveLogCompiler { - type Program = List[Statement] type CompiledBlock = String type CompiledBlocks = List[CompiledBlock] diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala new file mode 100644 index 000000000..a8d788a8c --- /dev/null +++ b/DeepDiveLogParser.scala @@ -0,0 +1,100 @@ +// DeepDiveLog syntax + +import scala.util.parsing.combinator._ + +// *************************************** +// * The union types for for the parser. * +// *************************************** +case class Variable(varName : String, relName : String, index : Int ) +case class Atom(name : String, terms : List[Variable]) +case class Attribute(name : String, terms : List[Variable], types : List[String]) +case class ConjunctiveQuery(head: Atom, body: List[Atom]) +case class Column(name : String, t : String) + +sealed trait FactorWeight { + def variables : List[String] +} + +case class KnownFactorWeight(value: Double) extends FactorWeight { + def variables = Nil +} + +case class UnknownFactorWeight(variables: List[String]) extends FactorWeight + +// Parser +class ConjunctiveQueryParser extends JavaTokenParsers { + // Odd definitions, but we'll keep them. + // def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} + // def stringliteral2: Parser[String] = """[a-zA-Z_0-9\./]*""".r ^^ {case (x) => x} + // def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} + def stringliteral: Parser[String] = """[a-zA-Z0-9_\[\]]+""".r + def path: Parser[String] = """[a-zA-Z0-9\./_]+""".r + + // relation names and columns are just strings. + def relation_name: Parser[String] = stringliteral ^^ {case (x) => x} + def col : Parser[String] = stringliteral ^^ { case(x) => x } + def attr : Parser[Column] = stringliteral ~ stringliteral ^^ { + case(x ~ y) => Column(x, y) + } + + def atom: Parser[Atom] = relation_name ~ "(" ~ rep1sep(col, ",") ~ ")" ^^ { + case (r ~ "(" ~ cols ~ ")") => { + val vars = cols.zipWithIndex map { case(name,i) => Variable(name, r, i) } + Atom(r,vars) + } + } + + def attribute: Parser[Attribute] = relation_name ~ "(" ~ rep1sep(attr, ",") ~ ")" ^^ { + case (r ~ "(" ~ attrs ~ ")") => { + val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } + var types = attrs map { case(x) => x.t } + Attribute(r,vars, types) + } + } + + def udf : Parser[String] = stringliteral ^^ {case (x) => x} + + def query : Parser[ConjunctiveQuery] = atom ~ ":-" ~ rep1sep(atom, ",") ^^ { + case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) + } + + def schemaElement : Parser[SchemaElement] = attribute ~ opt("?") ^^ { + case (a ~ None) => SchemaElement(a,true) + case (a ~ Some(_)) => SchemaElement(a,false) + } + + + def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ + "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ + "\"" ~ path ~ "\"" ~ "handles" ~ stringliteral ~ "lines" ^^ { + case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ + "\"" ~ d ~ "\"" ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) + } + + def extractionRule : Parser[ExtractionRule] = query ^^ { + case (q) => ExtractionRule(q) + // case (q ~ "udf" ~ "=" ~ None) => ExtractionRule(q,None) + } + + def functionRule : Parser[FunctionRule] = stringliteral ~ ":-" ~ "!" ~ stringliteral ~ "(" ~ stringliteral ~ ")" ^^ { + case (a ~ ":-" ~ "!" ~ b ~ "(" ~ c ~ ")") => FunctionRule(c, a, b) + } + + def constantWeight = "weight" ~> "=" ~> """-?[\d\.]+""".r ^^ { x => KnownFactorWeight(x.toDouble) } + def unknwonWeight = "weight" ~> "=" ~> opt(rep1sep(col, ",")) ^^ { + case Some(varList) => UnknownFactorWeight(varList.toList) + case _ => UnknownFactorWeight(List()) + } + def factorWeight = constantWeight | unknwonWeight + + def supervision = "label" ~> "=" ~> col + + def inferenceRule : Parser[InferenceRule] = query ~ factorWeight ~ supervision ^^ { + case (q ~ weight ~ supervision) => InferenceRule(q, weight, supervision) + } + + // rules or schema elements in aribitrary order + def statement : Parser[Statement] = (functionElement | inferenceRule | extractionRule | functionRule | schemaElement) ^^ {case(x) => x} + + def program : Parser[List[Statement]] = rep1sep(statement, ".") ^^ { case(x) => x } +} From 8094421ae97d160a8eff686880de5bbd665f6d80 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 23:09:06 -0700 Subject: [PATCH 039/347] Prints parse error --- DeepDiveLogCompiler.scala | 24 +++++++++++------------- DeepDiveLogParser.scala | 13 +++++++++++++ 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index dd0f44a8b..a8e0a8639 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -381,17 +381,16 @@ object DeepDiveLogCompiler { type CompiledBlock = String type CompiledBlocks = List[CompiledBlock] - def parseArgs(args: Array[String]) = { - val getContents = (filename: String) => { - val source = scala.io.Source.fromFile(filename) - try source.getLines mkString "\n" finally source.close() - } - args.map(getContents).reduce(_ ++ _) - } - val parser = new ConjunctiveQueryParser - def parseProgram(inputProgram: String) = { - parser.parse(parser.program, inputProgram) + def parseFilesOrExit(fileNames: Array[String]): Program = { + try { + fileNames.toList flatMap { parser.parseProgramFile(_) } + } catch { + case e: RuntimeException => + System.err.println("[error] " + e.getMessage) + System.exit(1) + null + } } def compileUserSettings(): CompiledBlocks = { @@ -429,9 +428,8 @@ object DeepDiveLogCompiler { // entry point for command-line interface def main(args: Array[String]) { - // get contents of all given files as one flat input program - val inputProgram = parseArgs(args) - val parsedProgram = parseProgram(inputProgram).get + // parse each file into a single program + val parsedProgram = parseFilesOrExit(args) // take an initial pass to analyze the parsed program val state = new CompilationState( parsedProgram ) diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index a8d788a8c..6f7bcc9a5 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -97,4 +97,17 @@ class ConjunctiveQueryParser extends JavaTokenParsers { def statement : Parser[Statement] = (functionElement | inferenceRule | extractionRule | functionRule | schemaElement) ^^ {case(x) => x} def program : Parser[List[Statement]] = rep1sep(statement, ".") ^^ { case(x) => x } + + def parseProgram(inputProgram: CharSequence, fileName: Option[String] = None): List[Statement] = { + parse(program, inputProgram) match { + case result: Success[_] => result.get + case error: NoSuccess => throw new RuntimeException(fileName.getOrElse("") + error.toString()) + } + } + + def parseProgramFile(fileName: String): List[Statement] = { + val source = scala.io.Source.fromFile(fileName) + try parseProgram(source.getLines mkString "\n", Some(fileName)) + finally source.close() + } } From 0035ce96d709a944599cf4c6a6ad260bbae1cb39 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 04:43:52 -0700 Subject: [PATCH 040/347] Formats the grammar to appear prettier (mostly only white space changes) --- DeepDiveLogParser.scala | 120 ++++++++++++++++++++++++---------------- 1 file changed, 71 insertions(+), 49 deletions(-) diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index 6f7bcc9a5..00478f4ce 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -31,72 +31,94 @@ class ConjunctiveQueryParser extends JavaTokenParsers { def path: Parser[String] = """[a-zA-Z0-9\./_]+""".r // relation names and columns are just strings. - def relation_name: Parser[String] = stringliteral ^^ {case (x) => x} - def col : Parser[String] = stringliteral ^^ { case(x) => x } - def attr : Parser[Column] = stringliteral ~ stringliteral ^^ { - case(x ~ y) => Column(x, y) - } - - def atom: Parser[Atom] = relation_name ~ "(" ~ rep1sep(col, ",") ~ ")" ^^ { - case (r ~ "(" ~ cols ~ ")") => { - val vars = cols.zipWithIndex map { case(name,i) => Variable(name, r, i) } - Atom(r,vars) + def relation_name: Parser[String] = stringliteral + def col : Parser[String] = stringliteral + def attr : Parser[Column] = + stringliteral ~ stringliteral ^^ { + case(x ~ y) => Column(x, y) } - } - def attribute: Parser[Attribute] = relation_name ~ "(" ~ rep1sep(attr, ",") ~ ")" ^^ { - case (r ~ "(" ~ attrs ~ ")") => { - val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } - var types = attrs map { case(x) => x.t } - Attribute(r,vars, types) + def atom: Parser[Atom] = + relation_name ~ "(" ~ rep1sep(col, ",") ~ ")" ^^ { + case (r ~ "(" ~ cols ~ ")") => + Atom(r, cols.zipWithIndex map { case(name,i) => Variable(name, r, i) }) } - } - def udf : Parser[String] = stringliteral ^^ {case (x) => x} + def attribute: Parser[Attribute] = + relation_name ~ "(" ~ rep1sep(attr, ",") ~ ")" ^^ { + case (r ~ "(" ~ attrs ~ ")") => { + val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } + var types = attrs map { case(x) => x.t } + Attribute(r, vars, types) + } + } - def query : Parser[ConjunctiveQuery] = atom ~ ":-" ~ rep1sep(atom, ",") ^^ { - case (headatom ~ ":-" ~ bodyatoms) => ConjunctiveQuery(headatom, bodyatoms.toList) - } + def query : Parser[ConjunctiveQuery] = + atom ~ ":-" ~ rep1sep(atom, ",") ^^ { + case (headatom ~ ":-" ~ bodyatoms) => + ConjunctiveQuery(headatom, bodyatoms.toList) + } - def schemaElement : Parser[SchemaElement] = attribute ~ opt("?") ^^ { - case (a ~ None) => SchemaElement(a,true) - case (a ~ Some(_)) => SchemaElement(a,false) - } + def schemaElement : Parser[SchemaElement] = + attribute ~ opt("?") ^^ { + case (a ~ None ) => SchemaElement(a,true) + case (a ~ Some(_)) => SchemaElement(a,false) + } - def functionElement : Parser[FunctionElement] = "function" ~ stringliteral ~ - "over like" ~ stringliteral ~ "returns like" ~ stringliteral ~ "implementation" ~ - "\"" ~ path ~ "\"" ~ "handles" ~ stringliteral ~ "lines" ^^ { - case ("function" ~ a ~ "over like" ~ b ~ "returns like" ~ c ~ "implementation" ~ - "\"" ~ d ~ "\"" ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) - } + def functionElement : Parser[FunctionElement] = + ( "function" ~ stringliteral + ~ "over" ~ "like" ~ stringliteral + ~ "returns" ~ "like" ~ stringliteral + ~ "implementation" ~ "\"" ~ path ~ "\"" ~ "handles" ~ stringliteral ~ "lines" + ) ^^ { + case ("function" ~ a + ~ "over" ~ "like" ~ b + ~ "returns" ~ "like" ~ c + ~ "implementation" ~ "\"" ~ d ~ "\"" ~ "handles" ~ e ~ "lines") => + FunctionElement(a, b, c, d, e) + } - def extractionRule : Parser[ExtractionRule] = query ^^ { - case (q) => ExtractionRule(q) - // case (q ~ "udf" ~ "=" ~ None) => ExtractionRule(q,None) - } + def extractionRule : Parser[ExtractionRule] = + query ^^ { + ExtractionRule(_) + } - def functionRule : Parser[FunctionRule] = stringliteral ~ ":-" ~ "!" ~ stringliteral ~ "(" ~ stringliteral ~ ")" ^^ { - case (a ~ ":-" ~ "!" ~ b ~ "(" ~ c ~ ")") => FunctionRule(c, a, b) - } + def functionRule : Parser[FunctionRule] = + ( stringliteral ~ ":-" ~ "!" + ~ stringliteral ~ "(" ~ stringliteral ~ ")" + ) ^^ { + case (out ~ ":-" ~ "!" ~ fn ~ "(" ~ in ~ ")") => + FunctionRule(in, out, fn) + } - def constantWeight = "weight" ~> "=" ~> """-?[\d\.]+""".r ^^ { x => KnownFactorWeight(x.toDouble) } - def unknwonWeight = "weight" ~> "=" ~> opt(rep1sep(col, ",")) ^^ { - case Some(varList) => UnknownFactorWeight(varList.toList) - case _ => UnknownFactorWeight(List()) - } + def constantWeight = + "weight" ~> "=" ~> """-?[\d\.]+""".r ^^ { + x => KnownFactorWeight(x.toDouble) + } + def unknwonWeight = + "weight" ~> "=" ~> opt(rep1sep(col, ",")) ^^ { + case Some(varList) => UnknownFactorWeight(varList.toList) + case _ => UnknownFactorWeight(List()) + } def factorWeight = constantWeight | unknwonWeight def supervision = "label" ~> "=" ~> col - def inferenceRule : Parser[InferenceRule] = query ~ factorWeight ~ supervision ^^ { - case (q ~ weight ~ supervision) => InferenceRule(q, weight, supervision) - } + def inferenceRule : Parser[InferenceRule] = + ( query ~ factorWeight ~ supervision + ) ^^ { + case (q ~ weight ~ supervision) => InferenceRule(q, weight, supervision) + } // rules or schema elements in aribitrary order - def statement : Parser[Statement] = (functionElement | inferenceRule | extractionRule | functionRule | schemaElement) ^^ {case(x) => x} - - def program : Parser[List[Statement]] = rep1sep(statement, ".") ^^ { case(x) => x } + def statement : Parser[Statement] = ( functionElement + | inferenceRule + | extractionRule + | functionRule + | schemaElement + ) + def program : Parser[List[Statement]] = rep1sep(statement, ".") def parseProgram(inputProgram: CharSequence, fileName: Option[String] = None): List[Statement] = { parse(program, inputProgram) match { From 4e367815f6267a61bf75fc33631a12b172e8ff94 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 05:02:46 -0700 Subject: [PATCH 041/347] Renames some misleading grammar variables e.g., stringliteral, attr, col --- DeepDiveLogParser.scala | 52 ++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index 00478f4ce..3ba2300a4 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -1,4 +1,5 @@ // DeepDiveLog syntax +// See: https://docs.google.com/document/d/1SBIvvki3mnR28Mf0Pkin9w9mWNam5AA0SpIGj1ZN2c4 import scala.util.parsing.combinator._ @@ -27,25 +28,28 @@ class ConjunctiveQueryParser extends JavaTokenParsers { // def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} // def stringliteral2: Parser[String] = """[a-zA-Z_0-9\./]*""".r ^^ {case (x) => x} // def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} - def stringliteral: Parser[String] = """[a-zA-Z0-9_\[\]]+""".r + def name: Parser[String] = """[a-zA-Z0-9_\[\]]+""".r def path: Parser[String] = """[a-zA-Z0-9\./_]+""".r // relation names and columns are just strings. - def relation_name: Parser[String] = stringliteral - def col : Parser[String] = stringliteral - def attr : Parser[Column] = - stringliteral ~ stringliteral ^^ { - case(x ~ y) => Column(x, y) + def relationName = name + def columnName = name + def columnType = name + def variableName = name + def functionName = name + def columnDeclaration : Parser[Column] = + columnName ~ columnType ^^ { + case(name ~ ty) => Column(name, ty) } def atom: Parser[Atom] = - relation_name ~ "(" ~ rep1sep(col, ",") ~ ")" ^^ { + relationName ~ "(" ~ rep1sep(variableName, ",") ~ ")" ^^ { case (r ~ "(" ~ cols ~ ")") => Atom(r, cols.zipWithIndex map { case(name,i) => Variable(name, r, i) }) } - def attribute: Parser[Attribute] = - relation_name ~ "(" ~ rep1sep(attr, ",") ~ ")" ^^ { + def relationDeclaration: Parser[Attribute] = + relationName ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ^^ { case (r ~ "(" ~ attrs ~ ")") => { val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } var types = attrs map { case(x) => x.t } @@ -59,18 +63,18 @@ class ConjunctiveQueryParser extends JavaTokenParsers { ConjunctiveQuery(headatom, bodyatoms.toList) } - def schemaElement : Parser[SchemaElement] = - attribute ~ opt("?") ^^ { + def schemaDeclaration : Parser[SchemaElement] = + relationDeclaration ~ opt("?") ^^ { case (a ~ None ) => SchemaElement(a,true) case (a ~ Some(_)) => SchemaElement(a,false) } - def functionElement : Parser[FunctionElement] = - ( "function" ~ stringliteral - ~ "over" ~ "like" ~ stringliteral - ~ "returns" ~ "like" ~ stringliteral - ~ "implementation" ~ "\"" ~ path ~ "\"" ~ "handles" ~ stringliteral ~ "lines" + def functionDeclaration : Parser[FunctionElement] = + ( "function" ~ functionName + ~ "over" ~ "like" ~ relationName + ~ "returns" ~ "like" ~ relationName + ~ "implementation" ~ "\"" ~ path ~ "\"" ~ "handles" ~ ("tsv" | "json") ~ "lines" ) ^^ { case ("function" ~ a ~ "over" ~ "like" ~ b @@ -84,9 +88,9 @@ class ConjunctiveQueryParser extends JavaTokenParsers { ExtractionRule(_) } - def functionRule : Parser[FunctionRule] = - ( stringliteral ~ ":-" ~ "!" - ~ stringliteral ~ "(" ~ stringliteral ~ ")" + def functionCallRule : Parser[FunctionRule] = + ( relationName ~ ":-" ~ "!" + ~ functionName ~ "(" ~ relationName ~ ")" ) ^^ { case (out ~ ":-" ~ "!" ~ fn ~ "(" ~ in ~ ")") => FunctionRule(in, out, fn) @@ -97,13 +101,13 @@ class ConjunctiveQueryParser extends JavaTokenParsers { x => KnownFactorWeight(x.toDouble) } def unknwonWeight = - "weight" ~> "=" ~> opt(rep1sep(col, ",")) ^^ { + "weight" ~> "=" ~> opt(rep1sep(variableName, ",")) ^^ { case Some(varList) => UnknownFactorWeight(varList.toList) case _ => UnknownFactorWeight(List()) } def factorWeight = constantWeight | unknwonWeight - def supervision = "label" ~> "=" ~> col + def supervision = "label" ~> "=" ~> variableName def inferenceRule : Parser[InferenceRule] = ( query ~ factorWeight ~ supervision @@ -112,11 +116,11 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } // rules or schema elements in aribitrary order - def statement : Parser[Statement] = ( functionElement + def statement : Parser[Statement] = ( functionDeclaration | inferenceRule | extractionRule - | functionRule - | schemaElement + | functionCallRule + | schemaDeclaration ) def program : Parser[List[Statement]] = rep1sep(statement, ".") From 2fa5613bdefe6d55725adbeb180244cabb1be0c8 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 13:06:54 -0700 Subject: [PATCH 042/347] Replaces our clunky parsers for names, numbers, strings with JavaTokenParsers' --- DeepDiveLogCompiler.scala | 3 ++- DeepDiveLogParser.scala | 57 +++++++++++++++++++-------------------- build.sbt | 1 + project/assembly.sbt | 1 + 4 files changed, 32 insertions(+), 30 deletions(-) create mode 100644 build.sbt create mode 100644 project/assembly.sbt diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index a8e0a8639..6a676d0f2 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -57,6 +57,7 @@ Consider */ import scala.collection.immutable.HashMap +import org.apache.commons.lang3.StringEscapeUtils // This handles the schema statements. // It can tell you if a predicate is a "query" predicate or a "ground prediate" @@ -285,7 +286,7 @@ case class FunctionRule(input : String, output : String, function : String) exte input: \"\"\" SELECT * FROM ${input} \"\"\" output_relation: \"${output}\" - udf: \"${function.implementation}\" + udf: \"${StringEscapeUtils.escapeJava(function.implementation)}\" style: \"${function.mode}_extractor\" ${ss.generateDependenciesOfCompiledBlockFor(this)} } diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index 3ba2300a4..c507f8f3e 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -2,6 +2,7 @@ // See: https://docs.google.com/document/d/1SBIvvki3mnR28Mf0Pkin9w9mWNam5AA0SpIGj1ZN2c4 import scala.util.parsing.combinator._ +import org.apache.commons.lang3.StringEscapeUtils // *************************************** // * The union types for for the parser. * @@ -24,19 +25,23 @@ case class UnknownFactorWeight(variables: List[String]) extends FactorWeight // Parser class ConjunctiveQueryParser extends JavaTokenParsers { - // Odd definitions, but we'll keep them. - // def stringliteral1: Parser[String] = ("'"+"""([^'\p{Cntrl}\\]|\\[\\"'bfnrt]|\\u[a-fA-F0-9]{4})*"""+"'").r ^^ {case (x) => x} - // def stringliteral2: Parser[String] = """[a-zA-Z_0-9\./]*""".r ^^ {case (x) => x} - // def stringliteral: Parser[String] = (stringliteral1 | stringliteral2) ^^ {case (x) => x} - def name: Parser[String] = """[a-zA-Z0-9_\[\]]+""".r - def path: Parser[String] = """[a-zA-Z0-9\./_]+""".r - - // relation names and columns are just strings. - def relationName = name - def columnName = name - def columnType = name - def variableName = name - def functionName = name + + // JavaTokenParsers provides several useful number parsers: + // wholeNumber, decimalNumber, floatingPointNumber + def floatingPointNumberAsDouble = floatingPointNumber ^^ { _.toDouble } + def stringLiteralAsString = stringLiteral ^^ { + s => StringEscapeUtils.unescapeJava( + s.stripPrefix("\"").stripSuffix("\"")) + } + + // We just use Java identifiers to parse various names + def relationName = ident + def columnName = ident + def columnType = ident ~ ("[]"?) ^^ { + case ty ~ isArrayType => ty + isArrayType.getOrElse("") + } + def variableName = ident + def functionName = ident def columnDeclaration : Parser[Column] = columnName ~ columnType ^^ { case(name ~ ty) => Column(name, ty) @@ -74,12 +79,12 @@ class ConjunctiveQueryParser extends JavaTokenParsers { ( "function" ~ functionName ~ "over" ~ "like" ~ relationName ~ "returns" ~ "like" ~ relationName - ~ "implementation" ~ "\"" ~ path ~ "\"" ~ "handles" ~ ("tsv" | "json") ~ "lines" + ~ "implementation" ~ stringLiteralAsString ~ "handles" ~ ("tsv" | "json") ~ "lines" ) ^^ { case ("function" ~ a ~ "over" ~ "like" ~ b ~ "returns" ~ "like" ~ c - ~ "implementation" ~ "\"" ~ d ~ "\"" ~ "handles" ~ e ~ "lines") => + ~ "implementation" ~ d ~ "handles" ~ e ~ "lines") => FunctionElement(a, b, c, d, e) } @@ -96,33 +101,27 @@ class ConjunctiveQueryParser extends JavaTokenParsers { FunctionRule(in, out, fn) } - def constantWeight = - "weight" ~> "=" ~> """-?[\d\.]+""".r ^^ { - x => KnownFactorWeight(x.toDouble) - } - def unknwonWeight = - "weight" ~> "=" ~> opt(rep1sep(variableName, ",")) ^^ { - case Some(varList) => UnknownFactorWeight(varList.toList) - case _ => UnknownFactorWeight(List()) - } - def factorWeight = constantWeight | unknwonWeight + def constantWeight = floatingPointNumberAsDouble ^^ { KnownFactorWeight(_) } + def unknownWeight = repsep(variableName, ",") ^^ { UnknownFactorWeight(_) } + def factorWeight = "weight" ~> "=" ~> (constantWeight | unknownWeight) def supervision = "label" ~> "=" ~> variableName def inferenceRule : Parser[InferenceRule] = ( query ~ factorWeight ~ supervision ) ^^ { - case (q ~ weight ~ supervision) => InferenceRule(q, weight, supervision) + case (q ~ weight ~ supervision) => + InferenceRule(q, weight, supervision) } // rules or schema elements in aribitrary order - def statement : Parser[Statement] = ( functionDeclaration + def statement : Parser[Statement] = ( schemaDeclaration | inferenceRule | extractionRule + | functionDeclaration | functionCallRule - | schemaDeclaration ) - def program : Parser[List[Statement]] = rep1sep(statement, ".") + def program : Parser[List[Statement]] = phrase(rep1(statement <~ ".")) def parseProgram(inputProgram: CharSequence, fileName: Option[String] = None): List[Statement] = { parse(program, inputProgram) match { diff --git a/build.sbt b/build.sbt new file mode 100644 index 000000000..1d2b3a28a --- /dev/null +++ b/build.sbt @@ -0,0 +1 @@ +libraryDependencies += "org.apache.commons" % "commons-lang3" % "3.4" diff --git a/project/assembly.sbt b/project/assembly.sbt new file mode 100644 index 000000000..74adde3fc --- /dev/null +++ b/project/assembly.sbt @@ -0,0 +1 @@ +addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.13.0") From c156884994edc7cec2f75ebb39b99be6161ca491 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 14:05:58 -0700 Subject: [PATCH 043/347] Cleans up conjunctiveQuery grammar making it more extensible in the future. --- DeepDiveLogCompiler.scala | 2 +- DeepDiveLogParser.scala | 57 ++++++++++++++++++++++++--------------- examples/test6.ddl | 2 +- 3 files changed, 37 insertions(+), 24 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 6a676d0f2..dfc90fada 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -83,7 +83,7 @@ class CompilationState( statements : DeepDiveLogCompiler.Program ) { terms.foreach { case Variable(n,r,i) => schema += { (r,i) -> n } - ground_relations += { r -> query } // record whether a query or a ground term. + ground_relations += { r -> !query } // record whether a query or a ground term. } case ExtractionRule(_) => () case InferenceRule(_,_,_) => () diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index c507f8f3e..b3181a024 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -8,6 +8,7 @@ import org.apache.commons.lang3.StringEscapeUtils // * The union types for for the parser. * // *************************************** case class Variable(varName : String, relName : String, index : Int ) +// TODO make Atom a trait, and have multiple case classes, e.g., RelationAtom and CondExprAtom case class Atom(name : String, terms : List[Variable]) case class Attribute(name : String, terms : List[Variable], types : List[String]) case class ConjunctiveQuery(head: Atom, body: List[Atom]) @@ -42,36 +43,48 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } def variableName = ident def functionName = ident - def columnDeclaration : Parser[Column] = + + def columnDeclaration: Parser[Column] = columnName ~ columnType ^^ { case(name ~ ty) => Column(name, ty) } - - def atom: Parser[Atom] = - relationName ~ "(" ~ rep1sep(variableName, ",") ~ ")" ^^ { - case (r ~ "(" ~ cols ~ ")") => - Atom(r, cols.zipWithIndex map { case(name,i) => Variable(name, r, i) }) - } - - def relationDeclaration: Parser[Attribute] = - relationName ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ^^ { - case (r ~ "(" ~ attrs ~ ")") => { + def schemaDeclaration: Parser[SchemaElement] = + relationName ~ opt("?") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ^^ { + case (r ~ isQuery ~ "(" ~ attrs ~ ")") => { val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } var types = attrs map { case(x) => x.t } - Attribute(r, vars, types) + SchemaElement(Attribute(r, vars, types), (isQuery != None)) } } - def query : Parser[ConjunctiveQuery] = - atom ~ ":-" ~ rep1sep(atom, ",") ^^ { - case (headatom ~ ":-" ~ bodyatoms) => - ConjunctiveQuery(headatom, bodyatoms.toList) + + // TODO support aggregate function syntax somehow + def cqHead = relationName ~ "(" ~ repsep(variableName, ",") ~ ")" ^^ { + case (r ~ "(" ~ variableUses ~ ")") => + Atom(r, variableUses.zipWithIndex map { + case(name,i) => Variable(name, r, i) + }) } - def schemaDeclaration : Parser[SchemaElement] = - relationDeclaration ~ opt("?") ^^ { - case (a ~ None ) => SchemaElement(a,true) - case (a ~ Some(_)) => SchemaElement(a,false) + // TODO add conditional expressions for where clause + def cqConditionalExpr = failure("No conditional expression supported yet") + def cqBodyAtom: Parser[Atom] = + ( relationName ~ "(" ~ repsep(variableName, ",") ~ ")" ^^ { + case (r ~ "(" ~ variableBindings ~ ")") => + Atom(r, variableBindings.zipWithIndex map { + case(name,i) => Variable(name, r, i) + }) + } + | cqConditionalExpr + ) + def cqBody: Parser[List[Atom]] = rep1sep(cqBodyAtom, ",") + def conjunctiveQuery : Parser[ConjunctiveQuery] = + cqHead ~ ":-" ~ rep1sep(cqBody, ";") ^^ { + case (headatom ~ ":-" ~ disjunctiveBodies) => + // TODO handle all disjunctiveBodies + // XXX only compiling the first body + val bodyatoms = disjunctiveBodies(0) + ConjunctiveQuery(headatom, bodyatoms.toList) } @@ -89,7 +102,7 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } def extractionRule : Parser[ExtractionRule] = - query ^^ { + conjunctiveQuery ^^ { ExtractionRule(_) } @@ -108,7 +121,7 @@ class ConjunctiveQueryParser extends JavaTokenParsers { def supervision = "label" ~> "=" ~> variableName def inferenceRule : Parser[InferenceRule] = - ( query ~ factorWeight ~ supervision + ( conjunctiveQuery ~ factorWeight ~ supervision ) ^^ { case (q ~ weight ~ supervision) => InferenceRule(q, weight, supervision) diff --git a/examples/test6.ddl b/examples/test6.ddl index 753bea2e5..f7b87deed 100644 --- a/examples/test6.ddl +++ b/examples/test6.ddl @@ -28,7 +28,7 @@ has_spouse_features( relation_id text, feature text). -has_spouse(relation_id text)?. +has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). From 2d8fa1093ee0f71fb8e156bf2344b45ebd5f19c6 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 23 Apr 2015 14:16:06 -0700 Subject: [PATCH 044/347] Renames Statement classes --- DeepDiveLogCompiler.scala | 39 +++++++++++++++++---------------------- DeepDiveLogParser.scala | 12 ++++++------ 2 files changed, 23 insertions(+), 28 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index dfc90fada..33d0a38d0 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -71,7 +71,7 @@ class CompilationState( statements : DeepDiveLogCompiler.Program ) { var ground_relations : Map[ String, Boolean ] = new HashMap[ String, Boolean ]() - var function_schema : Map[String, FunctionElement] = new HashMap[ String, FunctionElement]() + var function_schema : Map[String, FunctionDeclaration] = new HashMap[ String, FunctionDeclaration]() // The dependency graph between statements. var dependencies : Map[Statement, Set[Statement]] = new HashMap() @@ -79,16 +79,16 @@ class CompilationState( statements : DeepDiveLogCompiler.Program ) { def init() = { // generate the statements. statements.foreach { - case SchemaElement(Attribute(r, terms, types),query) => + case SchemaDeclaration(Attribute(r, terms, types), isQuery) => terms.foreach { case Variable(n,r,i) => schema += { (r,i) -> n } - ground_relations += { r -> !query } // record whether a query or a ground term. + ground_relations += { r -> !isQuery } // record whether a query or a ground term. } case ExtractionRule(_) => () case InferenceRule(_,_,_) => () - case FunctionElement(a, b, c, d, e) => function_schema += {a -> FunctionElement(a, b, c, d, e)} - case FunctionRule(_,_,_) => () + case FunctionDeclaration(a, b, c, d, e) => function_schema += {a -> FunctionDeclaration(a, b, c, d, e)} + case FunctionCallRule(_,_,_) => () } analyzeDependency(statements) @@ -98,9 +98,9 @@ class CompilationState( statements : DeepDiveLogCompiler.Program ) { // Given a statement, resolve its name for the compiled extractor block. def resolveExtractorBlockName(s: Statement): String = s match { - case s: ExtractionRule => s"extraction_rule_${statements indexOf s}" - case s: FunctionRule => s"extraction_rule_${statements indexOf s}" - case s: InferenceRule => s"extraction_rule_${s.q.head.name}" + case s: FunctionCallRule => s"extraction_rule_${statements indexOf s}" + case s: ExtractionRule => s"extraction_rule_${statements indexOf s}" + case s: InferenceRule => s"extraction_rule_${s.q.head.name}" } // Given a variable, resolve it. TODO: This should give a warning, @@ -116,13 +116,8 @@ class CompilationState( statements : DeepDiveLogCompiler.Program ) { } } - def resolveFunctionName( v : String ) : FunctionElement = { - if (function_schema contains v) { - function_schema(v) - } else { - return FunctionElement("0","0","0","0","0") - } - + def resolveFunctionName( v : String ) : FunctionDeclaration = { + function_schema(v) } // The default is query term. @@ -178,15 +173,15 @@ class CompilationState( statements : DeepDiveLogCompiler.Program ) { var stmtByHeadName = new HashMap[String, Statement]() statements foreach { case e : ExtractionRule => stmtByHeadName += { e.q.head.name -> e } - case f : FunctionRule => stmtByHeadName += { f.output -> f } + case f : FunctionCallRule => stmtByHeadName += { f.output -> f } case w : InferenceRule => stmtByHeadName += { w.q.head.name -> w } case _ => } // then, look at the body of each statement to construct a dependency graph statements foreach { - case f : FunctionRule => dependencies += { f -> ( Some(f.input) flatMap (stmtByHeadName get _)).toSet } - case e : ExtractionRule => dependencies += { e -> (e.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet } - case w : InferenceRule => dependencies += { w -> (w.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet } + case f : FunctionCallRule => dependencies += { f -> ( Some(f.input) flatMap (stmtByHeadName get _)).toSet } + case e : ExtractionRule => dependencies += { e -> (e.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet } + case w : InferenceRule => dependencies += { w -> (w.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet } case _ => } } @@ -237,8 +232,8 @@ trait Statement { type CompiledBlocks = DeepDiveLogCompiler.CompiledBlocks def compile(state: CompilationState): CompiledBlocks = List() } -case class SchemaElement( a : Attribute , query : Boolean ) extends Statement // atom and whether this is a query relation. -case class FunctionElement( functionName: String, input: String, output: String, implementation: String, mode: String) extends Statement +case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statement // atom and whether this is a query relation. +case class FunctionDeclaration( functionName: String, input: String, output: String, implementation: String, mode: String) extends Statement case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule { // Generate extraction rule part for deepdive @@ -271,7 +266,7 @@ case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction List(extractor) } } -case class FunctionRule(input : String, output : String, function : String) extends Statement // Extraction rule +case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule { override def compile(ss: CompilationState): CompiledBlocks = { val inputQuery = s""" diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index b3181a024..dc07e2d4a 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -48,12 +48,12 @@ class ConjunctiveQueryParser extends JavaTokenParsers { columnName ~ columnType ^^ { case(name ~ ty) => Column(name, ty) } - def schemaDeclaration: Parser[SchemaElement] = + def schemaDeclaration: Parser[SchemaDeclaration] = relationName ~ opt("?") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ^^ { case (r ~ isQuery ~ "(" ~ attrs ~ ")") => { val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } var types = attrs map { case(x) => x.t } - SchemaElement(Attribute(r, vars, types), (isQuery != None)) + SchemaDeclaration(Attribute(r, vars, types), (isQuery != None)) } } @@ -88,7 +88,7 @@ class ConjunctiveQueryParser extends JavaTokenParsers { } - def functionDeclaration : Parser[FunctionElement] = + def functionDeclaration : Parser[FunctionDeclaration] = ( "function" ~ functionName ~ "over" ~ "like" ~ relationName ~ "returns" ~ "like" ~ relationName @@ -98,7 +98,7 @@ class ConjunctiveQueryParser extends JavaTokenParsers { ~ "over" ~ "like" ~ b ~ "returns" ~ "like" ~ c ~ "implementation" ~ d ~ "handles" ~ e ~ "lines") => - FunctionElement(a, b, c, d, e) + FunctionDeclaration(a, b, c, d, e) } def extractionRule : Parser[ExtractionRule] = @@ -106,12 +106,12 @@ class ConjunctiveQueryParser extends JavaTokenParsers { ExtractionRule(_) } - def functionCallRule : Parser[FunctionRule] = + def functionCallRule : Parser[FunctionCallRule] = ( relationName ~ ":-" ~ "!" ~ functionName ~ "(" ~ relationName ~ ")" ) ^^ { case (out ~ ":-" ~ "!" ~ fn ~ "(" ~ in ~ ")") => - FunctionRule(in, out, fn) + FunctionCallRule(in, out, fn) } def constantWeight = floatingPointNumberAsDouble ^^ { KnownFactorWeight(_) } From 381d08419ffd2551ed3e8784be4769b80d8d9864 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Fri, 24 Apr 2015 02:06:54 -0700 Subject: [PATCH 045/347] Separates RelationType parsing --- DeepDiveLogCompiler.scala | 4 ++-- DeepDiveLogParser.scala | 21 +++++++++++++++------ 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 33d0a38d0..ed286e791 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -87,7 +87,7 @@ class CompilationState( statements : DeepDiveLogCompiler.Program ) { } case ExtractionRule(_) => () case InferenceRule(_,_,_) => () - case FunctionDeclaration(a, b, c, d, e) => function_schema += {a -> FunctionDeclaration(a, b, c, d, e)} + case f @ FunctionDeclaration(a, b, c, d, e) => function_schema += {a -> f} case FunctionCallRule(_,_,_) => () } @@ -233,7 +233,7 @@ trait Statement { def compile(state: CompilationState): CompiledBlocks = List() } case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statement // atom and whether this is a query relation. -case class FunctionDeclaration( functionName: String, input: String, output: String, implementation: String, mode: String) extends Statement +case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementation: String, mode: String) extends Statement case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule { // Generate extraction rule part for deepdive diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index dc07e2d4a..84028f532 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -21,9 +21,12 @@ sealed trait FactorWeight { case class KnownFactorWeight(value: Double) extends FactorWeight { def variables = Nil } - case class UnknownFactorWeight(variables: List[String]) extends FactorWeight +trait RelationType +case class RelationTypeDeclaration(names: List[String], types: List[String]) extends RelationType +case class RelationTypeAlias(likeRelationName: String) extends RelationType + // Parser class ConjunctiveQueryParser extends JavaTokenParsers { @@ -87,18 +90,24 @@ class ConjunctiveQueryParser extends JavaTokenParsers { ConjunctiveQuery(headatom, bodyatoms.toList) } + def relationType: Parser[RelationType] = + ( rep1sep(columnDeclaration, ",") ^^ { + attrs => RelationTypeDeclaration(attrs map { _.name }, attrs map { _.t }) + } + | "like" ~> relationName ^^ { RelationTypeAlias(_) } + ) def functionDeclaration : Parser[FunctionDeclaration] = ( "function" ~ functionName - ~ "over" ~ "like" ~ relationName - ~ "returns" ~ "like" ~ relationName + ~ "over" ~ relationType + ~ "returns" ~ relationType ~ "implementation" ~ stringLiteralAsString ~ "handles" ~ ("tsv" | "json") ~ "lines" ) ^^ { case ("function" ~ a - ~ "over" ~ "like" ~ b - ~ "returns" ~ "like" ~ c + ~ "over" ~ inTy + ~ "returns" ~ outTy ~ "implementation" ~ d ~ "handles" ~ e ~ "lines") => - FunctionDeclaration(a, b, c, d, e) + FunctionDeclaration(a, inTy, outTy, d, e) } def extractionRule : Parser[ExtractionRule] = From 61dda30aa90620c7e7f4d5afcc869b1031b44069 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Fri, 24 Apr 2015 02:42:47 -0700 Subject: [PATCH 046/347] Refactors grammar to keep implementation declarations separate --- DeepDiveLogCompiler.scala | 39 ++++++++++++++++++++++++--------------- DeepDiveLogParser.scala | 25 ++++++++++++++++--------- 2 files changed, 40 insertions(+), 24 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index ed286e791..fe05f1fa4 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -87,7 +87,7 @@ class CompilationState( statements : DeepDiveLogCompiler.Program ) { } case ExtractionRule(_) => () case InferenceRule(_,_,_) => () - case f @ FunctionDeclaration(a, b, c, d, e) => function_schema += {a -> f} + case fdecl : FunctionDeclaration => function_schema += {fdecl.functionName -> fdecl} case FunctionCallRule(_,_,_) => () } @@ -96,6 +96,10 @@ class CompilationState( statements : DeepDiveLogCompiler.Program ) { init() + def error(message: String) { + throw new RuntimeException(message) + } + // Given a statement, resolve its name for the compiled extractor block. def resolveExtractorBlockName(s: Statement): String = s match { case s: FunctionCallRule => s"extraction_rule_${statements indexOf s}" @@ -233,7 +237,7 @@ trait Statement { def compile(state: CompilationState): CompiledBlocks = List() } case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statement // atom and whether this is a query relation. -case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementation: String, mode: String) extends Statement +case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration]) extends Statement case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule { // Generate extraction rule part for deepdive @@ -274,6 +278,15 @@ case class FunctionCallRule(input : String, output : String, function : String) """ val function = ss.resolveFunctionName(this.function) + val udfDetails = (function.implementations collectFirst { + case impl: RowWiseLineHandler => + s"""udf: \"${StringEscapeUtils.escapeJava(impl.command)}\" + style: \"${impl.format}_extractor\"""" + }) + + if (udfDetails.isEmpty) + ss.error(s"Cannot find compilable implementation for function ${this.function} among:\n " + + (function.implementations mkString "\n ")) val blockName = ss.resolveExtractorBlockName(this) val extractor = s""" @@ -281,8 +294,7 @@ case class FunctionCallRule(input : String, output : String, function : String) input: \"\"\" SELECT * FROM ${input} \"\"\" output_relation: \"${output}\" - udf: \"${StringEscapeUtils.escapeJava(function.implementation)}\" - style: \"${function.mode}_extractor\" + ${udfDetails.get} ${ss.generateDependenciesOfCompiledBlockFor(this)} } """ @@ -378,15 +390,8 @@ object DeepDiveLogCompiler { type CompiledBlocks = List[CompiledBlock] val parser = new ConjunctiveQueryParser - def parseFilesOrExit(fileNames: Array[String]): Program = { - try { - fileNames.toList flatMap { parser.parseProgramFile(_) } - } catch { - case e: RuntimeException => - System.err.println("[error] " + e.getMessage) - System.exit(1) - null - } + def parseFiles(fileNames: Array[String]): Program = { + fileNames.toList flatMap { parser.parseProgramFile(_) } } def compileUserSettings(): CompiledBlocks = { @@ -423,9 +428,9 @@ object DeepDiveLogCompiler { } // entry point for command-line interface - def main(args: Array[String]) { + def main(args: Array[String]) = try { // parse each file into a single program - val parsedProgram = parseFilesOrExit(args) + val parsedProgram = parseFiles(args) // take an initial pass to analyze the parsed program val state = new CompilationState( parsedProgram ) @@ -441,5 +446,9 @@ object DeepDiveLogCompiler { // emit the generated code blocks foreach println + } catch { + case e: RuntimeException => + System.err.println("[error] " + e.getMessage) + System.exit(1) } } diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index 84028f532..46c518bb6 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -27,6 +27,10 @@ trait RelationType case class RelationTypeDeclaration(names: List[String], types: List[String]) extends RelationType case class RelationTypeAlias(likeRelationName: String) extends RelationType +trait FunctionImplementationDeclaration +case class RowWiseLineHandler(format: String, command: String) extends FunctionImplementationDeclaration + + // Parser class ConjunctiveQueryParser extends JavaTokenParsers { @@ -97,17 +101,20 @@ class ConjunctiveQueryParser extends JavaTokenParsers { | "like" ~> relationName ^^ { RelationTypeAlias(_) } ) + def functionImplementation : Parser[FunctionImplementationDeclaration] = + "implementation" ~ stringLiteralAsString ~ "handles" ~ ("tsv" | "json") ~ "lines" ^^ { + case (_ ~ command ~ _ ~ format ~ _) => RowWiseLineHandler(command=command, format=format) + } + def functionDeclaration : Parser[FunctionDeclaration] = - ( "function" ~ functionName - ~ "over" ~ relationType - ~ "returns" ~ relationType - ~ "implementation" ~ stringLiteralAsString ~ "handles" ~ ("tsv" | "json") ~ "lines" + ( "function" ~ functionName ~ "over" ~ relationType + ~ "returns" ~ relationType + ~ (functionImplementation+) ) ^^ { - case ("function" ~ a - ~ "over" ~ inTy - ~ "returns" ~ outTy - ~ "implementation" ~ d ~ "handles" ~ e ~ "lines") => - FunctionDeclaration(a, inTy, outTy, d, e) + case ("function" ~ a ~ "over" ~ inTy + ~ "returns" ~ outTy + ~ implementationDecls) => + FunctionDeclaration(a, inTy, outTy, implementationDecls) } def extractionRule : Parser[ExtractionRule] = From 8e2c5657086e1662e3495b7688cc8d24971b456a Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Fri, 24 Apr 2015 03:05:40 -0700 Subject: [PATCH 047/347] Updates README, renames spouse_example --- Makefile | 2 +- README.md | 13 +++++++++++-- examples/{test6.ddl => spouse_example.ddl} | 0 .../{test6.expected => spouse_example.expected} | 0 4 files changed, 12 insertions(+), 3 deletions(-) rename examples/{test6.ddl => spouse_example.ddl} (100%) rename examples/{test6.expected => spouse_example.expected} (100%) diff --git a/Makefile b/Makefile index 5a36a213a..02dc2d557 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # Makefile for DeepDiveLogCompiler TESTJAR = ddlc-test.jar -TEST = examples/test6.ddl +TEST = examples/spouse_example.ddl test: $(TESTJAR) CLASSPATH=$(shell sbt "export compile:dependency-classpath" | tail -1) \ diff --git a/README.md b/README.md index 87f79a46d..5f4807a65 100644 --- a/README.md +++ b/README.md @@ -3,14 +3,23 @@ DeepDiveLogCompiler A compiler that enables writing DeepDive apps in a Datalog-like syntax. -## Building +## Testing ```bash make ``` +## Building +The following command produces a standalone jar. + +```bash +make ddlc.jar +``` + ## Running The following will generate an application.conf for the [spouse example in DeepDive's tutorial](http://deepdive.stanford.edu/doc/basics/walkthrough/walkthrough.html). ```bash -scala ddlc.jar examples/spouse_example.ddl >application.conf +mkdir -p examples/spouse_example +java -jar ddlc.jar examples/spouse_example.ddl >examples/spouse_example/application.conf ``` + diff --git a/examples/test6.ddl b/examples/spouse_example.ddl similarity index 100% rename from examples/test6.ddl rename to examples/spouse_example.ddl diff --git a/examples/test6.expected b/examples/spouse_example.expected similarity index 100% rename from examples/test6.expected rename to examples/spouse_example.expected From 2bd3219f200c4d51afe2d2844f3b9bba51791c4d Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 28 Apr 2015 20:27:09 -0700 Subject: [PATCH 048/347] Adds scopt to support more than one mode Namely, compiling and pretty printing for now. --- DeepDiveLogCompiler.scala | 87 ++++++++++++++++++++++++++++++--------- Makefile | 4 +- build.sbt | 4 ++ 3 files changed, 74 insertions(+), 21 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index fe05f1fa4..04cff9d17 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -62,7 +62,7 @@ import org.apache.commons.lang3.StringEscapeUtils // This handles the schema statements. // It can tell you if a predicate is a "query" predicate or a "ground prediate" // and it resolves Variables their correct and true name in the schema, i.e. R(x,y) then x could be Attribute1 declared. -class CompilationState( statements : DeepDiveLogCompiler.Program ) { +class CompilationState( statements : DeepDiveLog.Program ) { // TODO: refactor the schema into a class that constructs and // manages these maps. Also it should have appropriate // abstractions and error handling for missing values. @@ -382,18 +382,37 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervisi } } +// An abstraction of DeepDiveLog handlers +trait DeepDiveLogHandler { + def run(program: DeepDiveLog.Program, config: DeepDiveLog.Config): Unit -// Compiler object that wires up everything together -object DeepDiveLogCompiler { - type Program = List[Statement] - type CompiledBlock = String - type CompiledBlocks = List[CompiledBlock] + def run(config: DeepDiveLog.Config): Unit = try { + // parse each file into a single program + val parsedProgram = parseFiles(config.inputFiles) + + // run handler with the parsed program + run(parsedProgram, config) + } catch { + case e: RuntimeException => die(e.getMessage) + } - val parser = new ConjunctiveQueryParser - def parseFiles(fileNames: Array[String]): Program = { - fileNames.toList flatMap { parser.parseProgramFile(_) } + def parseFiles(fileNames: List[String]): DeepDiveLog.Program = { + val ddlParser = new ConjunctiveQueryParser + fileNames flatMap { ddlParser.parseProgramFile(_) } } + def die(message: String = null) = { + if (message != null) + System.err.println("[error] " + message) + System.exit(1) + } +} + +// Compiler that wires up everything together +object DeepDiveLogCompiler extends DeepDiveLogHandler { + type CompiledBlock = String + type CompiledBlocks = List[CompiledBlock] + def compileUserSettings(): CompiledBlocks = { // TODO read user's proto-application.conf and augment it List(""" @@ -410,7 +429,7 @@ object DeepDiveLogCompiler { } // generate variable schema statements - def compileVariableSchema(statements: Program, ss: CompilationState): CompiledBlocks = { + def compileVariableSchema(statements: DeepDiveLog.Program, ss: CompilationState): CompiledBlocks = { var schema = Set[String]() // generate the statements. statements.foreach { @@ -427,11 +446,8 @@ object DeepDiveLogCompiler { List(ddSchema) } - // entry point for command-line interface - def main(args: Array[String]) = try { - // parse each file into a single program - val parsedProgram = parseFiles(args) - + // entry point for compilation + override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { // take an initial pass to analyze the parsed program val state = new CompilationState( parsedProgram ) @@ -446,9 +462,42 @@ object DeepDiveLogCompiler { // emit the generated code blocks foreach println - } catch { - case e: RuntimeException => - System.err.println("[error] " + e.getMessage) - System.exit(1) + } +} + +// Pretty printer that simply prints the parsed input +object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { + override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { + // TODO pretty print in original syntax + println(parsedProgram) + } +} + +// A command-line interface +object DeepDiveLog { + type Program = List[Statement] + + case class Config + ( handler: DeepDiveLogHandler = null + , inputFiles: List[String] = List() + ) + val parser = new scopt.OptionParser[Config]("ddlogc") { + head("ddlogc", "0.0.1") + cmd("compile") required() action { (_, c) => c.copy(handler = DeepDiveLogCompiler) } + cmd("print") required() action { (_, c) => c.copy(handler = DeepDiveLogPrettyPrinter) } + arg[String]("FILE...") unbounded() required() action { (f, c) => c.copy(inputFiles = c.inputFiles ++ List(f)) } text("Input DDLog programs files") + checkConfig { c => + if (c.handler == null) failure("No command specified") + else success + } + } + + def main(args: Array[String]) = { + parser.parse(args, Config()) match { + case Some(config) => + config.handler.run(config) + case None => + System.err.println("[error] ") + } } } diff --git a/Makefile b/Makefile index 02dc2d557..5d046e928 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ TEST = examples/spouse_example.ddl test: $(TESTJAR) CLASSPATH=$(shell sbt "export compile:dependency-classpath" | tail -1) \ - scala $< $(TEST) | diff -u $(TEST:.ddl=.expected) - + scala $< compile $(TEST) | diff -u $(TEST:.ddl=.expected) - $(TESTJAR): $(wildcard *.scala) sbt package ln -sfn $(shell ls -t target/scala-*/*_*.jar | head -1) $@ @@ -14,7 +14,7 @@ $(TESTJAR): $(wildcard *.scala) # standalone jar JAR = ddlc.jar test-package: $(JAR) - scala $< $(TEST) | diff -u $(TEST:.ddl=.expected) - + scala $< compile $(TEST) | diff -u $(TEST:.ddl=.expected) - $(JAR): $(wildcard *.scala) sbt assembly ln -sfn $(shell ls -t target/scala-*/*-assembly-*.jar | head -1) $@ diff --git a/build.sbt b/build.sbt index 1d2b3a28a..198955f7d 100644 --- a/build.sbt +++ b/build.sbt @@ -1 +1,5 @@ libraryDependencies += "org.apache.commons" % "commons-lang3" % "3.4" + +libraryDependencies += "com.github.scopt" %% "scopt" % "3.3.0" + +resolvers += Resolver.sonatypeRepo("public") From 9d424368381fa7ac2ed562415af3e17268599826 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 28 Apr 2015 20:44:57 -0700 Subject: [PATCH 049/347] Adds skeleton code for deriving the delta program --- DeepDiveLogCompiler.scala | 25 +++++++++++++++++++++---- DeepDiveLogDeltaDeriver.scala | 6 ++++++ 2 files changed, 27 insertions(+), 4 deletions(-) create mode 100644 DeepDiveLogDeltaDeriver.scala diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 04cff9d17..ad960b49e 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -448,28 +448,43 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // entry point for compilation override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { + // determine the program to compile + val programToCompile = + // derive and compile the program with delta rules instead for incremental version + if (config.isIncremental) DeepDiveLogDeltaDeriver.derive(parsedProgram) + else parsedProgram + // take an initial pass to analyze the parsed program - val state = new CompilationState( parsedProgram ) + val state = new CompilationState( programToCompile ) // compile the program into blocks of application.conf val blocks = ( compileUserSettings ::: - compileVariableSchema(parsedProgram, state) + compileVariableSchema(programToCompile, state) ::: - (parsedProgram flatMap (_.compile(state))) + (programToCompile flatMap (_.compile(state))) ) // emit the generated code blocks foreach println + + if (config.isIncremental) { + // TODO emit extra extractor for moving rows of dd_delta_* to * + } } } // Pretty printer that simply prints the parsed input object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { + val programToPrint = + // derive the delta rules for incremental version + if (config.isIncremental) DeepDiveLogDeltaDeriver.derive(parsedProgram) + else parsedProgram + // TODO pretty print in original syntax - println(parsedProgram) + println(programToPrint) } } @@ -480,11 +495,13 @@ object DeepDiveLog { case class Config ( handler: DeepDiveLogHandler = null , inputFiles: List[String] = List() + , isIncremental: Boolean = false ) val parser = new scopt.OptionParser[Config]("ddlogc") { head("ddlogc", "0.0.1") cmd("compile") required() action { (_, c) => c.copy(handler = DeepDiveLogCompiler) } cmd("print") required() action { (_, c) => c.copy(handler = DeepDiveLogPrettyPrinter) } + opt[Unit]('i', "incremental") optional() action { (_, c) => c.copy(isIncremental = true) } text("Whether to derive delta rules") arg[String]("FILE...") unbounded() required() action { (f, c) => c.copy(inputFiles = c.inputFiles ++ List(f)) } text("Input DDLog programs files") checkConfig { c => if (c.handler == null) failure("No command specified") diff --git a/DeepDiveLogDeltaDeriver.scala b/DeepDiveLogDeltaDeriver.scala new file mode 100644 index 000000000..cb3616e50 --- /dev/null +++ b/DeepDiveLogDeltaDeriver.scala @@ -0,0 +1,6 @@ +object DeepDiveLogDeltaDeriver { + def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { + // TODO derive delta relations and delta rules + program + } +} From 0682d2642ee43a01207990642598f24b98d719a6 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 28 Apr 2015 20:54:02 -0700 Subject: [PATCH 050/347] Splits DeepDiveLogCompiler.scala into several pieces --- DeepDiveLog.scala | 56 +++++++++++++++++++++++++++ DeepDiveLogCompiler.scala | 70 ---------------------------------- DeepDiveLogPrettyPrinter.scala | 12 ++++++ 3 files changed, 68 insertions(+), 70 deletions(-) create mode 100644 DeepDiveLog.scala create mode 100644 DeepDiveLogPrettyPrinter.scala diff --git a/DeepDiveLog.scala b/DeepDiveLog.scala new file mode 100644 index 000000000..b524a4846 --- /dev/null +++ b/DeepDiveLog.scala @@ -0,0 +1,56 @@ +// A command-line interface +object DeepDiveLog { + type Program = List[Statement] + + case class Config + ( handler: DeepDiveLogHandler = null + , inputFiles: List[String] = List() + , isIncremental: Boolean = false + ) + val parser = new scopt.OptionParser[Config]("ddlogc") { + head("ddlogc", "0.0.1") + cmd("compile") required() action { (_, c) => c.copy(handler = DeepDiveLogCompiler) } + cmd("print") required() action { (_, c) => c.copy(handler = DeepDiveLogPrettyPrinter) } + opt[Unit]('i', "incremental") optional() action { (_, c) => c.copy(isIncremental = true) } text("Whether to derive delta rules") + arg[String]("FILE...") unbounded() required() action { (f, c) => c.copy(inputFiles = c.inputFiles ++ List(f)) } text("Input DDLog programs files") + checkConfig { c => + if (c.handler == null) failure("No command specified") + else success + } + } + + def main(args: Array[String]) = { + parser.parse(args, Config()) match { + case Some(config) => + config.handler.run(config) + case None => + System.err.println("[error] ") + } + } +} + +// An abstraction of DeepDiveLog handlers +trait DeepDiveLogHandler { + def run(program: DeepDiveLog.Program, config: DeepDiveLog.Config): Unit + + def run(config: DeepDiveLog.Config): Unit = try { + // parse each file into a single program + val parsedProgram = parseFiles(config.inputFiles) + + // run handler with the parsed program + run(parsedProgram, config) + } catch { + case e: RuntimeException => die(e.getMessage) + } + + def parseFiles(fileNames: List[String]): DeepDiveLog.Program = { + val ddlParser = new ConjunctiveQueryParser + fileNames flatMap { ddlParser.parseProgramFile(_) } + } + + def die(message: String = null) = { + if (message != null) + System.err.println("[error] " + message) + System.exit(1) + } +} diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index ad960b49e..256825910 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -382,32 +382,6 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervisi } } -// An abstraction of DeepDiveLog handlers -trait DeepDiveLogHandler { - def run(program: DeepDiveLog.Program, config: DeepDiveLog.Config): Unit - - def run(config: DeepDiveLog.Config): Unit = try { - // parse each file into a single program - val parsedProgram = parseFiles(config.inputFiles) - - // run handler with the parsed program - run(parsedProgram, config) - } catch { - case e: RuntimeException => die(e.getMessage) - } - - def parseFiles(fileNames: List[String]): DeepDiveLog.Program = { - val ddlParser = new ConjunctiveQueryParser - fileNames flatMap { ddlParser.parseProgramFile(_) } - } - - def die(message: String = null) = { - if (message != null) - System.err.println("[error] " + message) - System.exit(1) - } -} - // Compiler that wires up everything together object DeepDiveLogCompiler extends DeepDiveLogHandler { type CompiledBlock = String @@ -474,47 +448,3 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } } } - -// Pretty printer that simply prints the parsed input -object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { - override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { - val programToPrint = - // derive the delta rules for incremental version - if (config.isIncremental) DeepDiveLogDeltaDeriver.derive(parsedProgram) - else parsedProgram - - // TODO pretty print in original syntax - println(programToPrint) - } -} - -// A command-line interface -object DeepDiveLog { - type Program = List[Statement] - - case class Config - ( handler: DeepDiveLogHandler = null - , inputFiles: List[String] = List() - , isIncremental: Boolean = false - ) - val parser = new scopt.OptionParser[Config]("ddlogc") { - head("ddlogc", "0.0.1") - cmd("compile") required() action { (_, c) => c.copy(handler = DeepDiveLogCompiler) } - cmd("print") required() action { (_, c) => c.copy(handler = DeepDiveLogPrettyPrinter) } - opt[Unit]('i', "incremental") optional() action { (_, c) => c.copy(isIncremental = true) } text("Whether to derive delta rules") - arg[String]("FILE...") unbounded() required() action { (f, c) => c.copy(inputFiles = c.inputFiles ++ List(f)) } text("Input DDLog programs files") - checkConfig { c => - if (c.handler == null) failure("No command specified") - else success - } - } - - def main(args: Array[String]) = { - parser.parse(args, Config()) match { - case Some(config) => - config.handler.run(config) - case None => - System.err.println("[error] ") - } - } -} diff --git a/DeepDiveLogPrettyPrinter.scala b/DeepDiveLogPrettyPrinter.scala new file mode 100644 index 000000000..d6429a060 --- /dev/null +++ b/DeepDiveLogPrettyPrinter.scala @@ -0,0 +1,12 @@ +// Pretty printer that simply prints the parsed input +object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { + override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { + val programToPrint = + // derive the delta rules for incremental version + if (config.isIncremental) DeepDiveLogDeltaDeriver.derive(parsedProgram) + else parsedProgram + + // TODO pretty print in original syntax + println(programToPrint) + } +} From fdb579f92b742b12383186ac33aca7b56cdf79dd Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 28 Apr 2015 22:47:14 -0700 Subject: [PATCH 051/347] Reverts visitor pattern refactor and moves the Statement classes back to DeepDiveLogParser, but retaining the compile() functions under DeepDiveLogCompiler. --- DeepDiveLogCompiler.scala | 100 ++++++++++++++++++-------------------- DeepDiveLogParser.scala | 7 +++ 2 files changed, 54 insertions(+), 53 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 256825910..d03bfe431 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -229,24 +229,26 @@ class QuerySchema(q : ConjunctiveQuery) { } -// Statements that will be parsed and compiled -// Statement-local compilation logic is kept in each case class' compile() method. -// Any global compilation logic should be kept in DeepDiveLogCompiler object. -trait Statement { - type CompiledBlocks = DeepDiveLogCompiler.CompiledBlocks - def compile(state: CompilationState): CompiledBlocks = List() -} -case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statement // atom and whether this is a query relation. -case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration]) extends Statement -case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule -{ +// Compiler that takes parsed program as input and prints blocks of application.conf +object DeepDiveLogCompiler extends DeepDiveLogHandler { + type CompiledBlock = String + type CompiledBlocks = List[CompiledBlock] + + // Dispatch to the corresponding compile function + def compile(stmt: Statement, ss: CompilationState): CompiledBlocks = stmt match { + case s: ExtractionRule => compile(s, ss) + case s: FunctionCallRule => compile(s, ss) + case s: InferenceRule => compile(s, ss) + case _ => List() // defaults to compiling into empty block + } + // Generate extraction rule part for deepdive - override def compile(ss: CompilationState): CompiledBlocks = { + def compile(stmt: ExtractionRule, ss: CompilationState): CompiledBlocks = { // Generate the body of the query. - val qs = new QuerySchema( q ) + val qs = new QuerySchema( stmt.q ) // variable columns - val variableCols = q.head.terms flatMap { - case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, q, true) + val variableCols = stmt.q.head.terms flatMap { + case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, stmt.q, true) } val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None @@ -255,29 +257,27 @@ case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction val inputQuery = s""" SELECT ${selectStr} - ${ ss.generateSQLBody(q) }""" + ${ ss.generateSQLBody(stmt.q) }""" - val blockName = ss.resolveExtractorBlockName(this) + val blockName = ss.resolveExtractorBlockName(stmt) val extractor = s""" deepdive.extraction.extractors.${blockName} { - sql: \"\"\" DROP VIEW IF EXISTS ${q.head.name}; - CREATE VIEW ${q.head.name} AS ${inputQuery} + sql: \"\"\" DROP VIEW IF EXISTS ${stmt.q.head.name}; + CREATE VIEW ${stmt.q.head.name} AS ${inputQuery} \"\"\" style: "sql_extractor" - ${ss.generateDependenciesOfCompiledBlockFor(this)} + ${ss.generateDependenciesOfCompiledBlockFor(stmt)} } """ List(extractor) } -} -case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule -{ - override def compile(ss: CompilationState): CompiledBlocks = { + + def compile(stmt: FunctionCallRule, ss: CompilationState): CompiledBlocks = { val inputQuery = s""" - SELECT * FROM ${input} + SELECT * FROM ${stmt.input} """ - val function = ss.resolveFunctionName(this.function) + val function = ss.resolveFunctionName(stmt.function) val udfDetails = (function.implementations collectFirst { case impl: RowWiseLineHandler => s"""udf: \"${StringEscapeUtils.escapeJava(impl.command)}\" @@ -285,28 +285,26 @@ case class FunctionCallRule(input : String, output : String, function : String) }) if (udfDetails.isEmpty) - ss.error(s"Cannot find compilable implementation for function ${this.function} among:\n " + ss.error(s"Cannot find compilable implementation for function ${stmt.function} among:\n " + (function.implementations mkString "\n ")) - val blockName = ss.resolveExtractorBlockName(this) + val blockName = ss.resolveExtractorBlockName(stmt) val extractor = s""" deepdive.extraction.extractors.${blockName} { - input: \"\"\" SELECT * FROM ${input} + input: \"\"\" SELECT * FROM ${stmt.input} \"\"\" - output_relation: \"${output}\" + output_relation: \"${stmt.output}\" ${udfDetails.get} - ${ss.generateDependenciesOfCompiledBlockFor(this)} + ${ss.generateDependenciesOfCompiledBlockFor(stmt)} } """ List(extractor) } -} -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String) extends Statement // Weighted rule -{ + // generate inference rule part for deepdive - override def compile(ss: CompilationState): CompiledBlocks = { + def compile(stmt: InferenceRule, ss: CompilationState): CompiledBlocks = { var blocks = List[String]() - val qs = new QuerySchema( q ) + val qs = new QuerySchema( stmt.q ) // node query // generate the node portion (V) of the factor graph @@ -334,20 +332,20 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervisi """ List(ext) } - if (ss.isQueryTerm(q.head.name)) - blocks :::= compileNodeRule(this, qs, ss) + if (ss.isQueryTerm(stmt.q.head.name)) + blocks :::= compileNodeRule(stmt, qs, ss) // edge query - val fakeBody = q.head +: q.body - val fakeCQ = ConjunctiveQuery(q.head, fakeBody) // we will just use the fakeBody below. + val fakeBody = stmt.q.head +: stmt.q.body + val fakeCQ = ConjunctiveQuery(stmt.q.head, fakeBody) // we will just use the fakeBody below. - val index = q.body.length + 1 + val index = stmt.q.body.length + 1 val qs2 = new QuerySchema( fakeCQ ) - val variableIdsStr = Some(s"""R0.id AS "${q.head.name}.R0.id" """) - val variableColsStr = Some(s"""R0.label AS "${q.head.name}.R0.label" """) + val variableIdsStr = Some(s"""R0.id AS "${stmt.q.head.name}.R0.id" """) + val variableColsStr = Some(s"""R0.label AS "${stmt.q.head.name}.R0.label" """) // weight string - val uwStr = weights match { + val uwStr = stmt.weights match { case KnownFactorWeight(x) => None case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, true)).mkString(", ")) } @@ -360,10 +358,10 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervisi ${ ss.generateSQLBody(fakeCQ) }""" // factor function - val func = s"""Imply(${q.head.name}.R0.label)""" + val func = s"""Imply(${stmt.q.head.name}.R0.label)""" // weight - val weight = weights match { + val weight = stmt.weights match { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, false)).mkString(", ")})""" @@ -371,7 +369,7 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervisi } blocks ::= s""" - deepdive.inference.factors.factor_${q.head.name} { + deepdive.inference.factors.factor_${stmt.q.head.name} { input_query: \"\"\"${inputQuery}\"\"\" function: "${func}" weight: "${weight}" @@ -380,12 +378,7 @@ case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervisi blocks.reverse } -} -// Compiler that wires up everything together -object DeepDiveLogCompiler extends DeepDiveLogHandler { - type CompiledBlock = String - type CompiledBlocks = List[CompiledBlock] def compileUserSettings(): CompiledBlocks = { // TODO read user's proto-application.conf and augment it @@ -420,6 +413,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { List(ddSchema) } + // entry point for compilation override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { // determine the program to compile @@ -437,7 +431,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { ::: compileVariableSchema(programToCompile, state) ::: - (programToCompile flatMap (_.compile(state))) + (programToCompile flatMap {compile(_, state)}) ) // emit the generated code diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index 46c518bb6..7c3fb8a12 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -30,6 +30,13 @@ case class RelationTypeAlias(likeRelationName: String) extends RelationType trait FunctionImplementationDeclaration case class RowWiseLineHandler(format: String, command: String) extends FunctionImplementationDeclaration +// Statements that will be parsed and compiled +trait Statement +case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statement // atom and whether this is a query relation. +case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration]) extends Statement +case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule +case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String) extends Statement // Weighted rule // Parser class ConjunctiveQueryParser extends JavaTokenParsers { From 284185dd8b1a6e30e3f486590bf5692dfacfd5c8 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 28 Apr 2015 22:49:32 -0700 Subject: [PATCH 052/347] Renames to DeepDiveLogParser --- DeepDiveLog.scala | 2 +- DeepDiveLogParser.scala | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/DeepDiveLog.scala b/DeepDiveLog.scala index b524a4846..859128685 100644 --- a/DeepDiveLog.scala +++ b/DeepDiveLog.scala @@ -44,7 +44,7 @@ trait DeepDiveLogHandler { } def parseFiles(fileNames: List[String]): DeepDiveLog.Program = { - val ddlParser = new ConjunctiveQueryParser + val ddlParser = new DeepDiveLogParser fileNames flatMap { ddlParser.parseProgramFile(_) } } diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index 7c3fb8a12..963e585ec 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -38,8 +38,9 @@ case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String) extends Statement // Weighted rule + // Parser -class ConjunctiveQueryParser extends JavaTokenParsers { +class DeepDiveLogParser extends JavaTokenParsers { // JavaTokenParsers provides several useful number parsers: // wholeNumber, decimalNumber, floatingPointNumber @@ -157,7 +158,7 @@ class ConjunctiveQueryParser extends JavaTokenParsers { | functionDeclaration | functionCallRule ) - def program : Parser[List[Statement]] = phrase(rep1(statement <~ ".")) + def program : Parser[DeepDiveLog.Program] = phrase(rep1(statement <~ ".")) def parseProgram(inputProgram: CharSequence, fileName: Option[String] = None): List[Statement] = { parse(program, inputProgram) match { From f7ee393f1452fb1f9f1c50c0bd3d9b3e7b2dd303 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 28 Apr 2015 23:50:48 -0700 Subject: [PATCH 053/347] Migrates tests to Bats for easy addition which stands for Bash Automated Testing System. --- .gitignore | 6 ++-- Makefile | 31 +++++++++++++------ README.md | 4 +-- ...pected => spouse_example.compile.expected} | 0 test/compile-example.bats | 19 ++++++++++++ test/test.sh | 26 ++++++++++++++++ 6 files changed, 72 insertions(+), 14 deletions(-) rename examples/{spouse_example.expected => spouse_example.compile.expected} (100%) create mode 100755 test/compile-example.bats create mode 100755 test/test.sh diff --git a/.gitignore b/.gitignore index a57fd1549..8ec9fb651 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ -/ddlc.jar -/ddlc-test.jar +/ddlog.jar +/ddlog-test.jar +/ddlog-test.jar.classpath target +/test/bats diff --git a/Makefile b/Makefile index 5d046e928..307062664 100644 --- a/Makefile +++ b/Makefile @@ -1,24 +1,35 @@ # Makefile for DeepDiveLogCompiler -TESTJAR = ddlc-test.jar -TEST = examples/spouse_example.ddl +JAR = ddlog.jar +TEST_JAR = ddlog-test.jar +TEST_CLASSPATH_CACHE = ddlog-test.jar.classpath -test: $(TESTJAR) - CLASSPATH=$(shell sbt "export compile:dependency-classpath" | tail -1) \ - scala $< compile $(TEST) | diff -u $(TEST:.ddl=.expected) - -$(TESTJAR): $(wildcard *.scala) +# test +.PHONY: test +test: $(TEST_JAR) $(TEST_CLASSPATH_CACHE) + DDLOG_JAR=$(realpath $<) \ +CLASSPATH=$(shell cat $(TEST_CLASSPATH_CACHE)) \ +test/test.sh +$(TEST_CLASSPATH_CACHE): build.sbt $(wildcard project/*.sbt) + sbt "export compile:dependency-classpath" | tail -1 >$@ + +# test standalone package +.PHONY: test-package +test-package: $(JAR) + $(MAKE) test TEST_JAR=$< + +# build test jar +$(TEST_JAR): $(wildcard *.scala) sbt package ln -sfn $(shell ls -t target/scala-*/*_*.jar | head -1) $@ touch $@ -# standalone jar -JAR = ddlc.jar -test-package: $(JAR) - scala $< compile $(TEST) | diff -u $(TEST:.ddl=.expected) - +# build standalone jar $(JAR): $(wildcard *.scala) sbt assembly ln -sfn $(shell ls -t target/scala-*/*-assembly-*.jar | head -1) $@ touch $@ +.PHONY: clean clean: sbt clean diff --git a/README.md b/README.md index 5f4807a65..25a0459f7 100644 --- a/README.md +++ b/README.md @@ -13,13 +13,13 @@ make The following command produces a standalone jar. ```bash -make ddlc.jar +make ddlog.jar ``` ## Running The following will generate an application.conf for the [spouse example in DeepDive's tutorial](http://deepdive.stanford.edu/doc/basics/walkthrough/walkthrough.html). ```bash mkdir -p examples/spouse_example -java -jar ddlc.jar examples/spouse_example.ddl >examples/spouse_example/application.conf +java -jar ddlog.jar examples/spouse_example.ddl >examples/spouse_example/application.conf ``` diff --git a/examples/spouse_example.expected b/examples/spouse_example.compile.expected similarity index 100% rename from examples/spouse_example.expected rename to examples/spouse_example.compile.expected diff --git a/test/compile-example.bats b/test/compile-example.bats new file mode 100755 index 000000000..ed88f6bca --- /dev/null +++ b/test/compile-example.bats @@ -0,0 +1,19 @@ +#!/usr/bin/env bats + +# required variables +: ${DDLOG_JAR:?path to ddlog jar} +: ${EXAMPLE:?path to input ddlog program} +EXAMPLE_NAME=${EXAMPLE%.ddl} +EXAMPLE_NAME=${EXAMPLE_NAME##*/} + +setup() { + [ -e "$DDLOG_JAR" ] + [ -e "$EXAMPLE" ] +} + +@test "compile $EXAMPLE_NAME" { + expectedOutput=${EXAMPLE%.ddl}.compile.expected + [ -e "$expectedOutput" ] || skip + scala "$DDLOG_JAR" compile "$EXAMPLE" | + diff -u "$expectedOutput" - +} diff --git a/test/test.sh b/test/test.sh new file mode 100755 index 000000000..722bed8da --- /dev/null +++ b/test/test.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -eu +cd "$(dirname "$0")" + +# make sure bats is available +PATH="$PWD/bats/bin:$PATH" +type bats &>/dev/null || + git clone https://github.com/sstephenson/bats.git + +# run all .bats tests +c=0 +for t in *.bats; do + case $t in + *-example.bats) + # run bats test for every example + for ddl in ../examples/*.ddl; do + EXAMPLE=$ddl bats $t || c=$? + done + ;; + + *) + # otherwise, simply run the bats + bats $t + esac +done +exit $c From 1d784fbe69523417471e6202eec01be65138fd1a Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 28 Apr 2015 23:52:54 -0700 Subject: [PATCH 054/347] Fixes glitches in symlinking --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 307062664..9f6d4a8f7 100644 --- a/Makefile +++ b/Makefile @@ -21,13 +21,13 @@ test-package: $(JAR) # build test jar $(TEST_JAR): $(wildcard *.scala) sbt package - ln -sfn $(shell ls -t target/scala-*/*_*.jar | head -1) $@ + ln -sfn $$(ls -t target/scala-*/*_*.jar | head -1) $@ touch $@ # build standalone jar $(JAR): $(wildcard *.scala) sbt assembly - ln -sfn $(shell ls -t target/scala-*/*-assembly-*.jar | head -1) $@ + ln -sfn $$(ls -t target/scala-*/*-assembly-*.jar | head -1) $@ touch $@ .PHONY: clean From af7ad6f2b2983a54c21d9089f296d96c97803492 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 28 Apr 2015 23:56:26 -0700 Subject: [PATCH 055/347] Updates README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 25a0459f7..9c4866568 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,6 @@ make ddlog.jar The following will generate an application.conf for the [spouse example in DeepDive's tutorial](http://deepdive.stanford.edu/doc/basics/walkthrough/walkthrough.html). ```bash mkdir -p examples/spouse_example -java -jar ddlog.jar examples/spouse_example.ddl >examples/spouse_example/application.conf +java -jar ddlog.jar compile examples/spouse_example.ddl >examples/spouse_example/application.conf ``` From 21fe888e821d6d0d749563af7b02067a93a48deb Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Wed, 29 Apr 2015 00:00:30 -0700 Subject: [PATCH 056/347] Adds a short description about the tests --- test/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 test/README.md diff --git a/test/README.md b/test/README.md new file mode 100644 index 000000000..a11b0e734 --- /dev/null +++ b/test/README.md @@ -0,0 +1,8 @@ +DDLog tests +=========== + +`test.sh` runs all tests. + +[Bats](https://github.com/sstephenson/bats.git) is used for most end-to-end tests. +Every `*-example.bats` will be run for each .ddl example under `../examples/`, with the path to the .ddl file set in the `EXAMPLE` environment variable. +Other .bats tests will run once. From f00933025ccd557748789bded68259d88811db57 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Wed, 29 Apr 2015 00:03:30 -0700 Subject: [PATCH 057/347] Clarifies test/README --- test/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/README.md b/test/README.md index a11b0e734..84d2e324d 100644 --- a/test/README.md +++ b/test/README.md @@ -4,5 +4,5 @@ DDLog tests `test.sh` runs all tests. [Bats](https://github.com/sstephenson/bats.git) is used for most end-to-end tests. -Every `*-example.bats` will be run for each .ddl example under `../examples/`, with the path to the .ddl file set in the `EXAMPLE` environment variable. -Other .bats tests will run once. +Every `*-example.bats` will be run for each `.ddl` example under `../examples/`, with the `EXAMPLE` environment variable set to the path to the example. +Rest of the `.bats` tests will run once. From 386534b794896eead93eaef824929193ece27826 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Wed, 29 Apr 2015 00:07:18 -0700 Subject: [PATCH 058/347] Adds comments to the .bats file --- test/compile-example.bats | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/compile-example.bats b/test/compile-example.bats index ed88f6bca..13944c2b0 100755 --- a/test/compile-example.bats +++ b/test/compile-example.bats @@ -1,4 +1,8 @@ #!/usr/bin/env bats +# Per example compilation test +# +# This test compares the `ddlog compile` output of a .ddl file with its expected output in .compile.expected file. +# Test is skipped if no expected output is there. # required variables : ${DDLOG_JAR:?path to ddlog jar} @@ -6,11 +10,13 @@ EXAMPLE_NAME=${EXAMPLE%.ddl} EXAMPLE_NAME=${EXAMPLE_NAME##*/} +# some preconditions setup() { [ -e "$DDLOG_JAR" ] [ -e "$EXAMPLE" ] } +# actual test that compares the compiled output @test "compile $EXAMPLE_NAME" { expectedOutput=${EXAMPLE%.ddl}.compile.expected [ -e "$expectedOutput" ] || skip From 5cd4608640970ca3e639b47c14219251a0122132 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Wed, 29 Apr 2015 01:44:16 -0700 Subject: [PATCH 059/347] Adds pretty printer implementation - Renames the test to per-example.bats for testing both compilation and pretty printing. - Also fixes a bug in the grammar parsing "like". --- DeepDiveLogParser.scala | 4 +- DeepDiveLogPrettyPrinter.scala | 81 ++++++++++++++++++++++- examples/spouse_example.print.expected | 89 ++++++++++++++++++++++++++ test/compile-example.bats | 25 -------- test/per-example.bats | 46 +++++++++++++ 5 files changed, 216 insertions(+), 29 deletions(-) create mode 100644 examples/spouse_example.print.expected delete mode 100755 test/compile-example.bats create mode 100755 test/per-example.bats diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index 963e585ec..d48732625 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -103,10 +103,10 @@ class DeepDiveLogParser extends JavaTokenParsers { } def relationType: Parser[RelationType] = - ( rep1sep(columnDeclaration, ",") ^^ { + ( "like" ~> relationName ^^ { RelationTypeAlias(_) } + | rep1sep(columnDeclaration, ",") ^^ { attrs => RelationTypeDeclaration(attrs map { _.name }, attrs map { _.t }) } - | "like" ~> relationName ^^ { RelationTypeAlias(_) } ) def functionImplementation : Parser[FunctionImplementationDeclaration] = diff --git a/DeepDiveLogPrettyPrinter.scala b/DeepDiveLogPrettyPrinter.scala index d6429a060..1bfe0fe8e 100644 --- a/DeepDiveLogPrettyPrinter.scala +++ b/DeepDiveLogPrettyPrinter.scala @@ -1,12 +1,89 @@ +import org.apache.commons.lang3.StringEscapeUtils + // Pretty printer that simply prints the parsed input object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { + + // Dispatch to the corresponding function + def print(stmt: Statement): String = stmt match { + case s: SchemaDeclaration => print(s) + case s: FunctionDeclaration => print(s) + case s: ExtractionRule => print(s) + case s: FunctionCallRule => print(s) + case s: InferenceRule => print(s) + } + + def print(stmt: SchemaDeclaration): String = { + val columnDecls = stmt.a.terms map { + case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" + } + val prefix = s"${stmt.a.name}${if (stmt.isQuery) "?" else ""}(" + val indentation = " " * prefix.length + s"""${prefix}${columnDecls.mkString(",\n" + indentation)}). + |""".stripMargin + } + + def print(relationType: RelationType): String = relationType match { + case ty: RelationTypeAlias => s"like ${ty.likeRelationName}" + case ty: RelationTypeDeclaration => + val namesWithTypes = (ty.names, ty.types).zipped map { + (colName,colType) => s"${colName} ${colType}"} + s"(${namesWithTypes.mkString(", ")})" + } + def print(stmt: FunctionDeclaration): String = { + val inputType = print(stmt.inputType) + val outputType = print(stmt.outputType) + val impls = stmt.implementations map { + case impl: RowWiseLineHandler => + "\"" + StringEscapeUtils.escapeJava(impl.command) + "\"" + + s"\n handles ${impl.format} lines" + } + s"""function ${stmt.functionName} + | over ${inputType} + | returns ${outputType} + | ${(impls map {"implementation " + _}).mkString("\n ")}. + |""".stripMargin + } + + def print(cq: ConjunctiveQuery): String = { + val printAtom = {a:Atom => + val vars = a.terms map { _.varName } + s"${a.name}(${vars.mkString(", ")})" + } + s"""${printAtom(cq.head)} :- + | ${(cq.body map printAtom).mkString(",\n ")}""".stripMargin + } + + def print(stmt: ExtractionRule): String = { + s"""${print(stmt.q)}. + |""".stripMargin + } + + def print(stmt: FunctionCallRule): String = { + s"""${stmt.output} :- !${stmt.function}(${stmt.input}). + |""".stripMargin + } + + def print(stmt: InferenceRule): String = { + print(stmt.q) + + ( if (stmt.weights == null) "" + else "\n weight = " + (stmt.weights match { + case KnownFactorWeight(w) => w.toString + case UnknownFactorWeight(vs) => vs.mkString(", ") + }) + ) + + ( if (stmt.supervision == null) "" + else "\n label = " + stmt.supervision + ) + + "." + } + override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { val programToPrint = // derive the delta rules for incremental version if (config.isIncremental) DeepDiveLogDeltaDeriver.derive(parsedProgram) else parsedProgram - // TODO pretty print in original syntax - println(programToPrint) + // pretty print in original syntax + programToPrint foreach {stmt => println(print(stmt))} } } diff --git a/examples/spouse_example.print.expected b/examples/spouse_example.print.expected new file mode 100644 index 000000000..6787de76d --- /dev/null +++ b/examples/spouse_example.print.expected @@ -0,0 +1,89 @@ +articles(article_id text, + text text). + +sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text). + +people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text). + +has_spouse_features(relation_id text, + feature text). + +has_spouse?(relation_id text). + +people_mentions :- !ext_people(ext_people_input). + +ext_people_input(sentence_id text, + words text[], + ner_tags text[]). + +ext_people_input(s, words, ner_tags) :- + sentences(a, b, words, c, d, e, ner_tags, f, s). + +function ext_people + over like ext_people_input + returns like people_mentions + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + handles tsv lines. + +has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). + +ext_has_spouse_input(sentence_id text, + p1_id text, + p1_text text, + p2_id text, + p2_text text). + +ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id). + +function ext_has_spouse + over like ext_has_spouse_input + returns like has_spouse_candidates + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + handles tsv lines. + +has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). + +ext_has_spouse_features_input(words text[], + relation_id text, + p1_start_position int, + p1_length int, + p2_start_position int, + p2_length int). + +ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +function ext_has_spouse_features + over like ext_has_spouse_features_input + returns like has_spouse_features + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + handles tsv lines. + +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f) + weight = f + label = l. diff --git a/test/compile-example.bats b/test/compile-example.bats deleted file mode 100755 index 13944c2b0..000000000 --- a/test/compile-example.bats +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bats -# Per example compilation test -# -# This test compares the `ddlog compile` output of a .ddl file with its expected output in .compile.expected file. -# Test is skipped if no expected output is there. - -# required variables -: ${DDLOG_JAR:?path to ddlog jar} -: ${EXAMPLE:?path to input ddlog program} -EXAMPLE_NAME=${EXAMPLE%.ddl} -EXAMPLE_NAME=${EXAMPLE_NAME##*/} - -# some preconditions -setup() { - [ -e "$DDLOG_JAR" ] - [ -e "$EXAMPLE" ] -} - -# actual test that compares the compiled output -@test "compile $EXAMPLE_NAME" { - expectedOutput=${EXAMPLE%.ddl}.compile.expected - [ -e "$expectedOutput" ] || skip - scala "$DDLOG_JAR" compile "$EXAMPLE" | - diff -u "$expectedOutput" - -} diff --git a/test/per-example.bats b/test/per-example.bats new file mode 100755 index 000000000..b8400dd0b --- /dev/null +++ b/test/per-example.bats @@ -0,0 +1,46 @@ +#!/usr/bin/env bats +# Per example tests +# +# This test compares outputs of various modes of ddlog against a .ddl example file with its expected output. +# Test is skipped if no expected output is found. + +# required variables +: ${DDLOG_JAR:?path to ddlog jar} +: ${EXAMPLE:?path to input ddlog program} +EXAMPLE_BASEPATH=${EXAMPLE%.ddl} +EXAMPLE_NAME=${EXAMPLE_BASEPATH##*/} + +# some preconditions +setup() { + [ -e "$DDLOG_JAR" ] + [ -e "$EXAMPLE" ] +} + +# compare the compiled output with what's expected +@test "compile $EXAMPLE_NAME" { + expectedOutput=$EXAMPLE_BASEPATH.compile.expected + [ -e "$expectedOutput" ] || skip + scala "$DDLOG_JAR" compile "$EXAMPLE" | + diff -u "$expectedOutput" - +} + +# compare the pretty-printed output with the input +@test "print $EXAMPLE_NAME as expected" { + expectedOutput=$EXAMPLE_BASEPATH.print.expected + [ -e "$expectedOutput" ] || skip + scala "$DDLOG_JAR" print "$EXAMPLE" | + diff -u "$expectedOutput" - +} + +# check if print is idempotent +@test "print $EXAMPLE_NAME is idempotent" { + printed=$BATS_TMPDIR/ddlog-$EXAMPLE_NAME-printed.ddl + scala "$DDLOG_JAR" print "$EXAMPLE" >"$printed" || skip + scala "$DDLOG_JAR" print "$printed" | + diff -u "$printed" - +} + + +# TODO incremental print + +# TODO incremental compile From 1734d4b6fb33c88c701d659d61d5f6cce5cc4104 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 30 Apr 2015 22:22:36 -0700 Subject: [PATCH 060/347] add function: derive delta relations and delta rules --- DeepDiveLogDeltaDeriver.scala | 131 +++++++++++++++++++++++++++++++++- 1 file changed, 128 insertions(+), 3 deletions(-) diff --git a/DeepDiveLogDeltaDeriver.scala b/DeepDiveLogDeltaDeriver.scala index cb3616e50..7be0eb238 100644 --- a/DeepDiveLogDeltaDeriver.scala +++ b/DeepDiveLogDeltaDeriver.scala @@ -1,6 +1,131 @@ -object DeepDiveLogDeltaDeriver { +import scala.collection.mutable.ListBuffer + +object DeepDiveLogDeltaDeriver{ + + val deltaPrefix = "dd_delta_" + + def transfer(stmt: Statement): List[Statement] = stmt match { + case s: SchemaDeclaration => transfer(s) + case s: FunctionDeclaration => transfer(s) + case s: ExtractionRule => transfer(s) + case s: FunctionCallRule => transfer(s) + case s: InferenceRule => transfer(s) + } + + def transfer(stmt: SchemaDeclaration): List[Statement] = { + var incrementalStatement = new ListBuffer[Statement]() + incrementalStatement += stmt + var newTerms = new ListBuffer[Variable]() + for (term <- stmt.a.terms) { + newTerms += Variable(term.varName, deltaPrefix + term.relName, term.index) + } + incrementalStatement += SchemaDeclaration(Attribute(deltaPrefix + stmt.a.name, newTerms.toList, stmt.a.types), stmt.isQuery) + incrementalStatement.toList + } + + def transfer(stmt: FunctionDeclaration): List[Statement] = { + var incrementalStatement = new ListBuffer[Statement]() + // incrementalStatement += stmt + var newTerms = new ListBuffer[Variable]() + var newInputType: RelationType = stmt.inputType match { + case inTy: RelationTypeDeclaration => { + var newNames = new ListBuffer[String]() + for (name <- inTy.names) + newNames += deltaPrefix + name + RelationTypeDeclaration(newNames.toList, inTy.types) + } + case inTy: RelationTypeAlias => RelationTypeAlias(deltaPrefix + inTy.likeRelationName) + } + var newOutputType: RelationType = stmt.outputType match { + case outTy: RelationTypeDeclaration => { + var newNames = new ListBuffer[String]() + for (name <- outTy.names) + newNames += deltaPrefix + name + RelationTypeDeclaration(newNames.toList, outTy.types) + } + case outTy: RelationTypeAlias => RelationTypeAlias(deltaPrefix + outTy.likeRelationName) + } + incrementalStatement += FunctionDeclaration(stmt.functionName, newInputType, newOutputType, stmt.implementations) + incrementalStatement.toList + } + + def transfer(stmt: ExtractionRule): List[Statement] = { + var incrementalStatement = new ListBuffer[Statement]() + + var newStmtCqHeadTerms = new ListBuffer[Variable]() + for (headTerm <- stmt.q.head.terms) { + newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) + } + var newStmtCqHead = Atom(deltaPrefix + stmt.q.head.name, newStmtCqHeadTerms.toList) + + var deltaStmtCqBody = new ListBuffer[Atom]() + for (stmtCqBody <- stmt.q.body) { // List[Atom] + var stmtCqBodyTerms = new ListBuffer[Variable]() + for (bodyTerm <- stmtCqBody.terms) { + stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) + } + deltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) + } + + var i = 0 + var j = 0 + for (i <- 1 to ((1 << stmt.q.body.length) - 1)) { + var newStmtCqBody = new ListBuffer[Atom]() + for (j <- 0 to (stmt.q.body.length - 1)) { + if ((i & (1 << j)) == 0) + newStmtCqBody += stmt.q.body(j) + else + newStmtCqBody += deltaStmtCqBody(j) + } + incrementalStatement += ExtractionRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBody.toList)) + } + incrementalStatement.toList + } + + def transfer(stmt: FunctionCallRule): List[Statement] = { + var incrementalStatement = new ListBuffer[Statement]() + incrementalStatement += FunctionCallRule(deltaPrefix + stmt.input, deltaPrefix + stmt.output, stmt.function) + incrementalStatement.toList + } + + def transfer(stmt: InferenceRule): List[Statement] = { + var incrementalStatement = new ListBuffer[Statement]() + + var newStmtCqHeadTerms = new ListBuffer[Variable]() + for (headTerm <- stmt.q.head.terms) { + newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) + } + var newStmtCqHead = Atom(deltaPrefix + stmt.q.head.name, newStmtCqHeadTerms.toList) + + var deltaStmtCqBody = new ListBuffer[Atom]() + for (stmtCqBody <- stmt.q.body) { // List[Atom] + var stmtCqBodyTerms = new ListBuffer[Variable]() + for (bodyTerm <- stmtCqBody.terms) { + stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) + } + deltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) + } + + var i = 0 + var j = 0 + for (i <- 1 to ((1 << stmt.q.body.length) - 1)) { + var newStmtCqBody = new ListBuffer[Atom]() + for (j <- 0 to (stmt.q.body.length - 1)) { + if ((i & (1 << j)) == 0) + newStmtCqBody += stmt.q.body(j) + else + newStmtCqBody += deltaStmtCqBody(j) + } + incrementalStatement += InferenceRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBody.toList), stmt.weights, stmt.supervision) + } + incrementalStatement.toList + } + def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { - // TODO derive delta relations and delta rules - program + var incrementalProgram = new ListBuffer[Statement]() + for (x <- program) { + incrementalProgram = incrementalProgram ++ transfer(x) + } + incrementalProgram.toList } } From fa91f7f0810c2cba777e062c154cd020507ad95e Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Fri, 1 May 2015 14:54:04 -0700 Subject: [PATCH 061/347] Adds a simple test for DeepDiveLogDeltaDeriver --- examples/rstu.ddl | 15 ++++++++++ examples/rstu.delta.expected | 57 ++++++++++++++++++++++++++++++++++++ test/per-example.bats | 10 +++++-- 3 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 examples/rstu.ddl create mode 100644 examples/rstu.delta.expected diff --git a/examples/rstu.ddl b/examples/rstu.ddl new file mode 100644 index 000000000..9f4abbd2c --- /dev/null +++ b/examples/rstu.ddl @@ -0,0 +1,15 @@ +R?(x text). + +S(x text). + +T(x text, + f text). + +U(x text, + l text). + +R(x) :- + S(x), + T(x, f), + U(x, l). + diff --git a/examples/rstu.delta.expected b/examples/rstu.delta.expected new file mode 100644 index 000000000..7c5ac4359 --- /dev/null +++ b/examples/rstu.delta.expected @@ -0,0 +1,57 @@ +R?(x text). + +dd_delta_R?(x text). + +S(x text). + +dd_delta_S(x text). + +dd_new_S(x text). + +dd_new_S(x) :- + S(x). + +dd_new_S(x) :- + dd_delta_S(x). + +T(x text, + f text). + +dd_delta_T(x text, + f text). + +dd_new_T(x text, + f text). + +dd_new_T(x, f) :- + T(x, f). + +dd_new_T(x, f) :- + dd_delta_T(x, f). + +U(x text, + l text). + +dd_delta_U(x text, + l text). + +R(x) :- + S(x), + T(x, f), + U(x, l). + +dd_delta_R(x) :- + dd_delta_S(x), + T(x, f), + U(x, l). + +dd_delta_R(x) :- + dd_new_S(x), + dd_delta_T(x, f), + U(x, l). + +dd_delta_R(x) :- + dd_new_S(x), + dd_new_T(x, f), + dd_delta_U(x, l). + diff --git a/test/per-example.bats b/test/per-example.bats index b8400dd0b..888fd32dd 100755 --- a/test/per-example.bats +++ b/test/per-example.bats @@ -24,7 +24,7 @@ setup() { diff -u "$expectedOutput" - } -# compare the pretty-printed output with the input +# compare the pretty-printed output with what's expected @test "print $EXAMPLE_NAME as expected" { expectedOutput=$EXAMPLE_BASEPATH.print.expected [ -e "$expectedOutput" ] || skip @@ -41,6 +41,12 @@ setup() { } -# TODO incremental print +# compare the pretty-printed incremental output with what's expected +@test "print $EXAMPLE_NAME as expected" { + expectedOutput=$EXAMPLE_BASEPATH.delta.expected + [ -e "$expectedOutput" ] || skip + scala "$DDLOG_JAR" print --incremental "$EXAMPLE" | + diff -u "$expectedOutput" - +} # TODO incremental compile From e1b5b59e87e39f95bbad009d95c1579efcfb34a3 Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 5 May 2015 13:15:58 -0700 Subject: [PATCH 062/347] add more comments in DeepDiveLogDeltaDeriver function --- DeepDiveLogDeltaDeriver.scala | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/DeepDiveLogDeltaDeriver.scala b/DeepDiveLogDeltaDeriver.scala index 7be0eb238..51f4242f1 100644 --- a/DeepDiveLogDeltaDeriver.scala +++ b/DeepDiveLogDeltaDeriver.scala @@ -2,6 +2,7 @@ import scala.collection.mutable.ListBuffer object DeepDiveLogDeltaDeriver{ + // Default prefix for incremental tables val deltaPrefix = "dd_delta_" def transfer(stmt: Statement): List[Statement] = stmt match { @@ -12,6 +13,8 @@ object DeepDiveLogDeltaDeriver{ case s: InferenceRule => transfer(s) } + // Incremental scheme declaration, + // keep the original scheme and create one delta scheme def transfer(stmt: SchemaDeclaration): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() incrementalStatement += stmt @@ -23,9 +26,10 @@ object DeepDiveLogDeltaDeriver{ incrementalStatement.toList } + // Incremental function declaration, + // create one delta function scheme based on original function scheme def transfer(stmt: FunctionDeclaration): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() - // incrementalStatement += stmt var newTerms = new ListBuffer[Variable]() var newInputType: RelationType = stmt.inputType match { case inTy: RelationTypeDeclaration => { @@ -49,9 +53,12 @@ object DeepDiveLogDeltaDeriver{ incrementalStatement.toList } + // Incremental extraction rule, + // create delta rules based on original extraction rule def transfer(stmt: ExtractionRule): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() + // New head var newStmtCqHeadTerms = new ListBuffer[Variable]() for (headTerm <- stmt.q.head.terms) { newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) @@ -67,6 +74,7 @@ object DeepDiveLogDeltaDeriver{ deltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) } + // New body var i = 0 var j = 0 for (i <- 1 to ((1 << stmt.q.body.length) - 1)) { @@ -82,15 +90,20 @@ object DeepDiveLogDeltaDeriver{ incrementalStatement.toList } + // Incremental function call rule, + // modify function input and output def transfer(stmt: FunctionCallRule): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() incrementalStatement += FunctionCallRule(deltaPrefix + stmt.input, deltaPrefix + stmt.output, stmt.function) incrementalStatement.toList } + // Incremental inference rule, + // create delta rules based on original extraction rule def transfer(stmt: InferenceRule): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() + // New head var newStmtCqHeadTerms = new ListBuffer[Variable]() for (headTerm <- stmt.q.head.terms) { newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) @@ -106,6 +119,7 @@ object DeepDiveLogDeltaDeriver{ deltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) } + // New body var i = 0 var j = 0 for (i <- 1 to ((1 << stmt.q.body.length) - 1)) { From 0a5e43742accf414602be23a209930f5bfe543bf Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 5 May 2015 15:40:04 -0700 Subject: [PATCH 063/347] add dd_count calculation when isIncremental is true --- DeepDiveLogCompiler.scala | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index d03bfe431..3ebac870f 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -235,15 +235,15 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { type CompiledBlocks = List[CompiledBlock] // Dispatch to the corresponding compile function - def compile(stmt: Statement, ss: CompilationState): CompiledBlocks = stmt match { - case s: ExtractionRule => compile(s, ss) - case s: FunctionCallRule => compile(s, ss) - case s: InferenceRule => compile(s, ss) + def compile(stmt: Statement, ss: CompilationState, isIncremental: Boolean): CompiledBlocks = stmt match { + case s: ExtractionRule => compile(s, ss, isIncremental) + case s: FunctionCallRule => compile(s, ss, isIncremental) + case s: InferenceRule => compile(s, ss, isIncremental) case _ => List() // defaults to compiling into empty block } // Generate extraction rule part for deepdive - def compile(stmt: ExtractionRule, ss: CompilationState): CompiledBlocks = { + def compile(stmt: ExtractionRule, ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { // Generate the body of the query. val qs = new QuerySchema( stmt.q ) // variable columns @@ -255,8 +255,11 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val selectStr = (List(variableColsStr) flatMap (u => u)).mkString(", ") + val ddCount = if (isIncremental) ( stmt.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\"""" else "" + val inputQuery = s""" - SELECT ${selectStr} + SELECT ${selectStr}${ddCountStr} ${ ss.generateSQLBody(stmt.q) }""" val blockName = ss.resolveExtractorBlockName(stmt) @@ -272,7 +275,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { List(extractor) } - def compile(stmt: FunctionCallRule, ss: CompilationState): CompiledBlocks = { + def compile(stmt: FunctionCallRule, ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { val inputQuery = s""" SELECT * FROM ${stmt.input} """ @@ -302,7 +305,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } // generate inference rule part for deepdive - def compile(stmt: InferenceRule, ss: CompilationState): CompiledBlocks = { + def compile(stmt: InferenceRule, ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { var blocks = List[String]() val qs = new QuerySchema( stmt.q ) @@ -316,7 +319,10 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val name = ss.resolveName(qs.getVar(z.supervision)) val labelCol = s"R${index}.${name}" val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") - val query = s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label + val ddCount = if (isIncremental) ( stmt.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" + + val query = s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} ${ ss.generateSQLBody(z.q) }""" val blockName = ss.resolveExtractorBlockName(z) @@ -352,9 +358,12 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatMap (u => u)).mkString(", ") + val ddCount = if (isIncremental) ( fakeCQ.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\"""" else "" + // factor input query val inputQuery = s""" - SELECT ${selectStr} + SELECT ${selectStr} ${ddCountStr} ${ ss.generateSQLBody(fakeCQ) }""" // factor function @@ -431,7 +440,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { ::: compileVariableSchema(programToCompile, state) ::: - (programToCompile flatMap {compile(_, state)}) + (programToCompile flatMap {compile(_, state, config.isIncremental)}) ) // emit the generated code From 2dbb3d1cb3b551daf1ed140a2f28f47b7d707500 Mon Sep 17 00:00:00 2001 From: senwu Date: Wed, 6 May 2015 23:16:53 -0700 Subject: [PATCH 064/347] reslove dependency issue: first, based on statement type, union statements with same head to blocks; then reslove dependency among different blocks --- DeepDiveLogCompiler.scala | 334 +++++++++++++++++++++++--------------- 1 file changed, 206 insertions(+), 128 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 3ebac870f..aed9bb916 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -58,6 +58,7 @@ Consider import scala.collection.immutable.HashMap import org.apache.commons.lang3.StringEscapeUtils +import scala.collection.mutable.ListBuffer // This handles the schema statements. // It can tell you if a predicate is a "query" predicate or a "ground prediate" @@ -76,6 +77,8 @@ class CompilationState( statements : DeepDiveLog.Program ) { // The dependency graph between statements. var dependencies : Map[Statement, Set[Statement]] = new HashMap() + var visable : Set[Statement] = Set() + def init() = { // generate the statements. statements.foreach { @@ -91,6 +94,7 @@ class CompilationState( statements : DeepDiveLog.Program ) { case FunctionCallRule(_,_,_) => () } + analyzeVisable(statements) analyzeDependency(statements) } @@ -101,10 +105,14 @@ class CompilationState( statements : DeepDiveLog.Program ) { } // Given a statement, resolve its name for the compiled extractor block. - def resolveExtractorBlockName(s: Statement): String = s match { - case s: FunctionCallRule => s"extraction_rule_${statements indexOf s}" - case s: ExtractionRule => s"extraction_rule_${statements indexOf s}" - case s: InferenceRule => s"extraction_rule_${s.q.head.name}" + def resolveExtractorBlockName(s: Statement): String = { + if (visable contains s) { + s match { + case s: FunctionCallRule => s"extraction_rule_${statements indexOf s}" + case s: ExtractionRule => s"extraction_rule_${statements indexOf s}" + case s: InferenceRule => s"extraction_rule_${s.q.head.name}" + } + } else "" } // Given a variable, resolve it. TODO: This should give a warning, @@ -170,29 +178,68 @@ class CompilationState( statements : DeepDiveLog.Program ) { ${ whereClauseStr }""" } + // Analyze the block visibility among statements + def analyzeVisable(statements: List[Statement]) = { + val extractionRules = new ListBuffer[ExtractionRule]() + val functionCallRules = new ListBuffer[FunctionCallRule]() + val inferenceRules = new ListBuffer[InferenceRule]() + + statements foreach (_ match { + case s: ExtractionRule => extractionRules += s + case s: FunctionCallRule => functionCallRules += s + case s: InferenceRule => inferenceRules += s + case _ => + }) + + val extractionRulesGroup = extractionRules.groupBy(_.q.head.name) + val functionCallRulesGroup = functionCallRules.groupBy(_.input) + val inferenceRulesGroup = inferenceRules.groupBy(_.q.head.name) + + extractionRulesGroup foreach {keyVal => visable += keyVal._2(0)} + functionCallRulesGroup foreach {keyVal => visable += keyVal._2(0)} + inferenceRulesGroup foreach {keyVal => visable += keyVal._2(0)} + } // Analyze the dependency between statements and construct a graph. def analyzeDependency(statements: List[Statement]) = { // first map head names to the actual statement - var stmtByHeadName = new HashMap[String, Statement]() + val extractionRuleByHeadName = new ListBuffer[ExtractionRule]() + val inferenceRuleByHeadName = new ListBuffer[InferenceRule]() + val functionCallRuleByHeadName = new ListBuffer[FunctionCallRule]() + statements foreach { - case e : ExtractionRule => stmtByHeadName += { e.q.head.name -> e } - case f : FunctionCallRule => stmtByHeadName += { f.output -> f } - case w : InferenceRule => stmtByHeadName += { w.q.head.name -> w } - case _ => + case s: ExtractionRule => extractionRuleByHeadName += s + case s: FunctionCallRule => functionCallRuleByHeadName += s + case s: InferenceRule => inferenceRuleByHeadName += s + case _ => } + + val eByHeadNameGroup = extractionRuleByHeadName.toList.groupBy(_.q.head.name) + val iByHeadNameGroup = inferenceRuleByHeadName.toList.groupBy(_.q.head.name) + val fByHeadNameGroup = functionCallRuleByHeadName.toList.groupBy(_.output) + + var stmtByHeadName = (eByHeadNameGroup.toSeq ++ iByHeadNameGroup.toSeq ++ fByHeadNameGroup.toSeq).groupBy(_._1).mapValues(_.map(_._2).toList) + // var stmtByHeadName = new HashMap[String, Statement]() + // statements foreach { + // case e : ExtractionRule => stmtByHeadName += { e.q.head.name -> e } + // case f : FunctionCallRule => stmtByHeadName += { f.output -> f } + // case w : InferenceRule => stmtByHeadName += { w.q.head.name -> w } + // case _ => + // } // then, look at the body of each statement to construct a dependency graph statements foreach { - case f : FunctionCallRule => dependencies += { f -> ( Some(f.input) flatMap (stmtByHeadName get _)).toSet } - case e : ExtractionRule => dependencies += { e -> (e.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet } - case w : InferenceRule => dependencies += { w -> (w.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet } + case f : FunctionCallRule => dependencies += { f -> (( Some(f.input) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } + case e : ExtractionRule => dependencies += { e -> ((e.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } + case w : InferenceRule => dependencies += { w -> ((w.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } case _ => } } // Generates a "dependencies" value for a compiled block of given statement. - def generateDependenciesOfCompiledBlockFor(statement: Statement): String = { - val dependentExtractorBlockNames = - dependencies getOrElse (statement, Set()) map resolveExtractorBlockName + def generateDependenciesOfCompiledBlockFor(statements: List[Statement]): String = { + var dependentExtractorBlockNames = Set[String]() + for (statement <- statements) { + dependentExtractorBlockNames ++= ((dependencies getOrElse (statement, Set())) & visable) map resolveExtractorBlockName + } if (dependentExtractorBlockNames.size == 0) "" else { val depStr = dependentExtractorBlockNames map {" \"" + _ + "\" "} mkString(", ") s"dependencies: [${depStr}]" @@ -235,156 +282,168 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { type CompiledBlocks = List[CompiledBlock] // Dispatch to the corresponding compile function - def compile(stmt: Statement, ss: CompilationState, isIncremental: Boolean): CompiledBlocks = stmt match { - case s: ExtractionRule => compile(s, ss, isIncremental) - case s: FunctionCallRule => compile(s, ss, isIncremental) - case s: InferenceRule => compile(s, ss, isIncremental) - case _ => List() // defaults to compiling into empty block - } + // def compile(stmts: List[Statement], ss: CompilationState, isIncremental: Boolean): CompiledBlocks = stmts(0) match { + // case s: ExtractionRule => compileE(stmts, ss, isIncremental) + // case s: FunctionCallRule => compileF(stmts, ss, isIncremental) + // case s: InferenceRule => compileI(stmts, ss, isIncremental) + // case _ => List() // defaults to compiling into empty block + // } // Generate extraction rule part for deepdive - def compile(stmt: ExtractionRule, ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { - // Generate the body of the query. - val qs = new QuerySchema( stmt.q ) - // variable columns - val variableCols = stmt.q.head.terms flatMap { - case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, stmt.q, true) - } + def compileE(stmts: List[ExtractionRule], ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { + var inputQueries = new ListBuffer[String]() + for (stmt <- stmts) { + // Generate the body of the query. + val qs = new QuerySchema( stmt.q ) + // variable columns + val variableCols = stmt.q.head.terms flatMap { + case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, stmt.q, true) + } - val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None + val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None - val selectStr = (List(variableColsStr) flatMap (u => u)).mkString(", ") + val selectStr = (List(variableColsStr) flatMap (u => u)).mkString(", ") - val ddCount = if (isIncremental) ( stmt.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\"""" else "" + val ddCount = if (isIncremental) ( stmt.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" - val inputQuery = s""" - SELECT ${selectStr}${ddCountStr} - ${ ss.generateSQLBody(stmt.q) }""" + inputQueries += s""" + SELECT ${selectStr}${ddCountStr} + ${ ss.generateSQLBody(stmt.q) }""" - val blockName = ss.resolveExtractorBlockName(stmt) + } + val blockName = ss.resolveExtractorBlockName(stmts(0)) val extractor = s""" deepdive.extraction.extractors.${blockName} { - sql: \"\"\" DROP VIEW IF EXISTS ${stmt.q.head.name}; - CREATE VIEW ${stmt.q.head.name} AS ${inputQuery} + sql: \"\"\" DROP VIEW IF EXISTS ${stmts(0).q.head.name}; + CREATE VIEW ${stmts(0).q.head.name} AS ${inputQueries.mkString(" UNION ")} \"\"\" style: "sql_extractor" - ${ss.generateDependenciesOfCompiledBlockFor(stmt)} + ${ss.generateDependenciesOfCompiledBlockFor(stmts)} } """ List(extractor) } - def compile(stmt: FunctionCallRule, ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { - val inputQuery = s""" - SELECT * FROM ${stmt.input} - """ - - val function = ss.resolveFunctionName(stmt.function) - val udfDetails = (function.implementations collectFirst { - case impl: RowWiseLineHandler => - s"""udf: \"${StringEscapeUtils.escapeJava(impl.command)}\" - style: \"${impl.format}_extractor\"""" - }) - - if (udfDetails.isEmpty) - ss.error(s"Cannot find compilable implementation for function ${stmt.function} among:\n " - + (function.implementations mkString "\n ")) - - val blockName = ss.resolveExtractorBlockName(stmt) - val extractor = s""" - deepdive.extraction.extractors.${blockName} { - input: \"\"\" SELECT * FROM ${stmt.input} - \"\"\" - output_relation: \"${stmt.output}\" - ${udfDetails.get} - ${ss.generateDependenciesOfCompiledBlockFor(stmt)} - } - """ - List(extractor) + def compileF(stmts: List[FunctionCallRule], ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { + var extractors = new ListBuffer[String]() + for (stmt <- stmts) { + val inputQuery = s""" + SELECT * FROM ${stmt.input} + """ + + val function = ss.resolveFunctionName(stmt.function) + val udfDetails = (function.implementations collectFirst { + case impl: RowWiseLineHandler => + s"""udf: \"${StringEscapeUtils.escapeJava(impl.command)}\" + style: \"${impl.format}_extractor\" """ + }) + + if (udfDetails.isEmpty) + ss.error(s"Cannot find compilable implementation for function ${stmt.function} among:\n " + + (function.implementations mkString "\n ")) + + val blockName = ss.resolveExtractorBlockName(stmt) + val extractor = s""" + deepdive.extraction.extractors.${blockName} { + input: \"\"\" SELECT * FROM ${stmt.input} + \"\"\" + output_relation: \"${stmt.output}\" + ${udfDetails.get} + ${ss.generateDependenciesOfCompiledBlockFor(List(stmt))} + } + """ + extractors += extractor + } + extractors.toList } // generate inference rule part for deepdive - def compile(stmt: InferenceRule, ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { + def compileI(stmts: List[InferenceRule], ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { var blocks = List[String]() - val qs = new QuerySchema( stmt.q ) - + val qs = new QuerySchema( stmts(0).q ) // node query // generate the node portion (V) of the factor graph - def compileNodeRule(z: InferenceRule, qs: QuerySchema, ss: CompilationState) : CompiledBlocks = { - val headTerms = z.q.head.terms map { - case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" + def compileNodeRule(zs: List[InferenceRule], qs: QuerySchema, ss: CompilationState) : CompiledBlocks = { + var inputQueries = new ListBuffer[String]() + for (z <- zs) { + val headTerms = z.q.head.terms map { + case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" + } + val index = qs.getBodyIndex(z.supervision) + val name = ss.resolveName(qs.getVar(z.supervision)) + val labelCol = s"R${index}.${name}" + val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") + val ddCount = if (isIncremental) ( z.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" + + inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} + ${ ss.generateSQLBody(z.q) } + """ } - val index = qs.getBodyIndex(z.supervision) - val name = ss.resolveName(qs.getVar(z.supervision)) - val labelCol = s"R${index}.${name}" - val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") - val ddCount = if (isIncremental) ( stmt.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" - - val query = s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} - ${ ss.generateSQLBody(z.q) }""" - - val blockName = ss.resolveExtractorBlockName(z) + val blockName = ss.resolveExtractorBlockName(zs(0)) val ext = s""" deepdive.extraction.extractors.${blockName} { - sql: \"\"\" DROP TABLE IF EXISTS ${z.q.head.name}; - CREATE TABLE ${z.q.head.name} AS - ${query} + sql: \"\"\" DROP TABLE IF EXISTS ${zs(0).q.head.name}; + CREATE TABLE ${zs(0).q.head.name} AS + ${inputQueries.mkString(" UNION ")} \"\"\" style: "sql_extractor" - ${ss.generateDependenciesOfCompiledBlockFor(z)} + ${ss.generateDependenciesOfCompiledBlockFor(zs)} } """ List(ext) } - if (ss.isQueryTerm(stmt.q.head.name)) - blocks :::= compileNodeRule(stmt, qs, ss) - - // edge query - val fakeBody = stmt.q.head +: stmt.q.body - val fakeCQ = ConjunctiveQuery(stmt.q.head, fakeBody) // we will just use the fakeBody below. - - val index = stmt.q.body.length + 1 - val qs2 = new QuerySchema( fakeCQ ) - val variableIdsStr = Some(s"""R0.id AS "${stmt.q.head.name}.R0.id" """) - val variableColsStr = Some(s"""R0.label AS "${stmt.q.head.name}.R0.label" """) - - // weight string - val uwStr = stmt.weights match { - case KnownFactorWeight(x) => None - case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, true)).mkString(", ")) - } + if (ss.isQueryTerm(stmts(0).q.head.name)) + blocks :::= compileNodeRule(stmts, qs, ss) - val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatMap (u => u)).mkString(", ") + val inferenceRuleToCompileGroup = stmts.groupBy(_.q.head.name) - val ddCount = if (isIncremental) ( fakeCQ.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\"""" else "" + for (stmt <- stmts) { + // edge query + val fakeBody = stmt.q.head +: stmt.q.body + val fakeCQ = ConjunctiveQuery(stmt.q.head, fakeBody) // we will just use the fakeBody below. - // factor input query - val inputQuery = s""" - SELECT ${selectStr} ${ddCountStr} - ${ ss.generateSQLBody(fakeCQ) }""" + val index = stmt.q.body.length + 1 + val qs2 = new QuerySchema( fakeCQ ) + val variableIdsStr = Some(s"""R0.id AS "${stmt.q.head.name}.R0.id" """) + val variableColsStr = Some(s"""R0.label AS "${stmt.q.head.name}.R0.label" """) - // factor function - val func = s"""Imply(${stmt.q.head.name}.R0.label)""" - - // weight - val weight = stmt.weights match { - case KnownFactorWeight(x) => s"${x}" - case UnknownFactorWeight(w) => { - s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, false)).mkString(", ")})""" + // weight string + val uwStr = stmt.weights match { + case KnownFactorWeight(x) => None + case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, true)).mkString(", ")) } - } - blocks ::= s""" - deepdive.inference.factors.factor_${stmt.q.head.name} { - input_query: \"\"\"${inputQuery}\"\"\" - function: "${func}" - weight: "${weight}" + val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatMap (u => u)).mkString(", ") + + val ddCount = if (isIncremental) ( fakeCQ.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" + + // factor input query + val inputQuery = s""" + SELECT ${selectStr} ${ddCountStr} + ${ ss.generateSQLBody(fakeCQ) }""" + + // factor function + val func = s"""Imply(${stmt.q.head.name}.R0.label)""" + + // weight + val weight = stmt.weights match { + case KnownFactorWeight(x) => s"${x}" + case UnknownFactorWeight(w) => { + s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, false)).mkString(", ")})""" + } } - """ + blocks ::= s""" + deepdive.inference.factors.factor_${stmt.q.head.name} { + input_query: \"\"\"${inputQuery}\"\"\" + function: "${func}" + weight: "${weight}" + } + """ + } blocks.reverse } @@ -422,7 +481,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { List(ddSchema) } - // entry point for compilation override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { // determine the program to compile @@ -434,13 +492,33 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // take an initial pass to analyze the parsed program val state = new CompilationState( programToCompile ) + + val extractionRuleToCompile = new ListBuffer[ExtractionRule]() + val inferenceRuleToCompile = new ListBuffer[InferenceRule]() + val functionCallRuleToCompile = new ListBuffer[FunctionCallRule]() + programToCompile foreach (_ match { + case s: ExtractionRule => extractionRuleToCompile += s + case s: FunctionCallRule => functionCallRuleToCompile += s + case s: InferenceRule => inferenceRuleToCompile += s + case _ => + }) + + val extractionRuleToCompileGroup = extractionRuleToCompile.groupBy(_.q.head.name) + val inferenceRuleToCompileGroup = inferenceRuleToCompile.groupBy(_.q.head.name) + val functionCallRuleToCompileGroup = functionCallRuleToCompile.groupBy(_.input) + + val body = new ListBuffer[String]() + extractionRuleToCompileGroup foreach {keyVal => body ++= compileE(keyVal._2.toList, state, config.isIncremental)} + functionCallRuleToCompileGroup foreach {keyVal => body ++= compileF(keyVal._2.toList, state, config.isIncremental)} + inferenceRuleToCompileGroup foreach {keyVal => body ++= compileI(keyVal._2.toList, state, config.isIncremental)} + // compile the program into blocks of application.conf val blocks = ( compileUserSettings ::: compileVariableSchema(programToCompile, state) ::: - (programToCompile flatMap {compile(_, state, config.isIncremental)}) + body.toList ) // emit the generated code From 1bd05f426a2cc0dda384ea90292d2866f819f09c Mon Sep 17 00:00:00 2001 From: senwu Date: Wed, 6 May 2015 23:22:18 -0700 Subject: [PATCH 065/347] add spouse incremental example --- ...pouse_incremental_example.compile.expected | 179 +++++++++++++ examples/spouse_incremental_example.ddl | 101 ++++++++ .../spouse_incremental_example.print.expected | 238 ++++++++++++++++++ 3 files changed, 518 insertions(+) create mode 100644 examples/spouse_incremental_example.compile.expected create mode 100644 examples/spouse_incremental_example.ddl create mode 100644 examples/spouse_incremental_example.print.expected diff --git a/examples/spouse_incremental_example.compile.expected b/examples/spouse_incremental_example.compile.expected new file mode 100644 index 000000000..d02f580f9 --- /dev/null +++ b/examples/spouse_incremental_example.compile.expected @@ -0,0 +1,179 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + } + + + deepdive.schema.variables { + dd_delta_has_spouse.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; + CREATE VIEW dd_delta_ext_people_input AS + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + FROM dd_delta_sentences R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_27 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; + CREATE VIEW dd_delta_ext_has_spouse_features_input AS + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_17" , "extraction_rule_12" ] + } + + + deepdive.extraction.extractors.extraction_rule_20 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; + CREATE VIEW dd_delta_ext_has_spouse_input AS + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_delta_people_mentions R0, people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id UNION + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM people_mentions R0, dd_delta_people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id UNION + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_delta_people_mentions R0, dd_delta_people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_12" ] + } + + + deepdive.extraction.extractors.extraction_rule_12 { + input: """ SELECT * FROM dd_delta_ext_people_input + """ + output_relation: "dd_delta_people_mentions" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_15" ] + } + + + deepdive.extraction.extractors.extraction_rule_24 { + input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input + """ + output_relation: "dd_delta_has_spouse_features" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_27" ] + } + + + deepdive.extraction.extractors.extraction_rule_17 { + input: """ SELECT * FROM dd_delta_ext_has_spouse_input + """ + output_relation: "dd_delta_has_spouse_candidates" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_20" ] + } + + + deepdive.extraction.extractors.extraction_rule_dd_delta_has_spouse { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse; + CREATE TABLE dd_delta_has_spouse AS + SELECT DISTINCT 0 as id, R0.relation_id, R0.l AS label , R0.dd_count * R1.dd_count AS dd_count + FROM dd_delta_has_spouse_candidates R0, has_spouse_features R1 + WHERE R1.relation_id = R0.relation_id + UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.l AS label , R0.dd_count * R1.dd_count AS dd_count + FROM has_spouse_candidates R0, dd_delta_has_spouse_features R1 + WHERE R1.relation_id = R0.relation_id + UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.l AS label , R0.dd_count * R1.dd_count AS dd_count + FROM dd_delta_has_spouse_candidates R0, dd_delta_has_spouse_features R1 + WHERE R1.relation_id = R0.relation_id + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_17" , "extraction_rule_24" ] + } + + + deepdive.inference.factors.factor_dd_delta_has_spouse { + input_query: """ + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + function: "Imply(dd_delta_has_spouse.R0.label)" + weight: "?(has_spouse_features.R2.feature)" + } + + + deepdive.inference.factors.factor_dd_delta_has_spouse { + input_query: """ + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, has_spouse_candidates R1, dd_delta_has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + function: "Imply(dd_delta_has_spouse.R0.label)" + weight: "?(dd_delta_has_spouse_features.R2.feature)" + } + + + deepdive.inference.factors.factor_dd_delta_has_spouse { + input_query: """ + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, dd_delta_has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + function: "Imply(dd_delta_has_spouse.R0.label)" + weight: "?(dd_delta_has_spouse_features.R2.feature)" + } + diff --git a/examples/spouse_incremental_example.ddl b/examples/spouse_incremental_example.ddl new file mode 100644 index 000000000..6ff0ae117 --- /dev/null +++ b/examples/spouse_incremental_example.ddl @@ -0,0 +1,101 @@ +articles( + article_id text, + text text, + dd_count int). +sentences( + document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text, + dd_count int). +people_mentions( + sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int). + +has_spouse_candidates( + person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + dd_count int). +has_spouse_features( + relation_id text, + feature text, + dd_count int). + +has_spouse?( + relation_id text, + dd_count int). + +people_mentions :- + !ext_people(ext_people_input). + +ext_people_input( + sentence_id text, + words text[], + ner_tags text[], + dd_count int). + +ext_people_input(s, words, ner_tags) :- + sentences(a, b, words, c, d, e, ner_tags, f, s). + +function ext_people over like ext_people_input + returns like people_mentions + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. + +has_spouse_candidates :- + !ext_has_spouse(ext_has_spouse_input). + +ext_has_spouse_input( + sentence_id text, + p1_id text, + p1_text text, + p2_id text, + p2_text text, + dd_count int). + +ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id). + +function ext_has_spouse over like ext_has_spouse_input + returns like has_spouse_candidates + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. + +has_spouse_features :- + !ext_has_spouse_features(ext_has_spouse_features_input). + +ext_has_spouse_features_input( + words text[], + relation_id text, + p1_start_position int, + p1_length int, + p2_start_position int, + p2_length int, + dd_count int). + +ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +function ext_has_spouse_features over like ext_has_spouse_features_input + returns like has_spouse_features + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. + +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f) +weight = f +label = l. diff --git a/examples/spouse_incremental_example.print.expected b/examples/spouse_incremental_example.print.expected new file mode 100644 index 000000000..a9e5ad633 --- /dev/null +++ b/examples/spouse_incremental_example.print.expected @@ -0,0 +1,238 @@ +articles(article_id text, + text text). + +dd_delta_articles(article_id text, + text text). + +sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text). + +dd_delta_sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text). + +people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +dd_delta_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text). + +dd_delta_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text). + +has_spouse_features(relation_id text, + feature text). + +dd_delta_has_spouse_features(relation_id text, + feature text). + +has_spouse?(relation_id text). + +dd_delta_has_spouse?(relation_id text). + +dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). + +ext_people_input(sentence_id text, + words text[], + ner_tags text[]). + +dd_delta_ext_people_input(sentence_id text, + words text[], + ner_tags text[]). + +dd_delta_ext_people_input(s, words, ner_tags) :- + dd_delta_sentences(a, b, words, c, d, e, ner_tags, f, s). + +function ext_people + over like dd_delta_ext_people_input + returns like dd_delta_people_mentions + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + handles tsv lines. + +dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). + +ext_has_spouse_input(sentence_id text, + p1_id text, + p1_text text, + p2_id text, + p2_text text). + +dd_delta_ext_has_spouse_input(sentence_id text, + p1_id text, + p1_text text, + p2_id text, + p2_text text). + +dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + dd_delta_people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id). + +dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + people_mentions(s, a, b, p1_text, p1_id), + dd_delta_people_mentions(s, c, d, p2_text, p2_id). + +dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + dd_delta_people_mentions(s, a, b, p1_text, p1_id), + dd_delta_people_mentions(s, c, d, p2_text, p2_id). + +function ext_has_spouse + over like dd_delta_ext_has_spouse_input + returns like dd_delta_has_spouse_candidates + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + handles tsv lines. + +dd_delta_has_spouse_features :- !ext_has_spouse_features(dd_delta_ext_has_spouse_features_input). + +ext_has_spouse_features_input(words text[], + relation_id text, + p1_start_position int, + p1_length int, + p2_start_position int, + p2_length int). + +dd_delta_ext_has_spouse_features_input(words text[], + relation_id text, + p1_start_position int, + p1_length int, + p2_start_position int, + p2_length int). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +function ext_has_spouse_features + over like dd_delta_ext_has_spouse_features_input + returns like dd_delta_has_spouse_features + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + handles tsv lines. + +dd_delta_has_spouse(rid) :- + dd_delta_has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f) + weight = f + label = l. +dd_delta_has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), + dd_delta_has_spouse_features(rid, f) + weight = f + label = l. +dd_delta_has_spouse(rid) :- + dd_delta_has_spouse_candidates(a, b, c, d, rid, l), + dd_delta_has_spouse_features(rid, f) + weight = f + label = l. From 9c3aec08344fb4dde6f6dbc15bb73a1aadbff3d3 Mon Sep 17 00:00:00 2001 From: senwu Date: Mon, 11 May 2015 22:20:27 -0700 Subject: [PATCH 066/347] fix error in spouse example --- examples/spouse_example.compile.expected | 113 +++++++++--------- examples/spouse_example.ddl | 20 ++-- examples/spouse_example.print.expected | 23 ++-- ...pouse_incremental_example.compile.expected | 106 ++++++++-------- examples/spouse_incremental_example.ddl | 20 ++-- .../spouse_incremental_example.print.expected | 74 ++++++------ 6 files changed, 183 insertions(+), 173 deletions(-) diff --git a/examples/spouse_example.compile.expected b/examples/spouse_example.compile.expected index cb5252ee9..d463a2f83 100644 --- a/examples/spouse_example.compile.expected +++ b/examples/spouse_example.compile.expected @@ -11,94 +11,95 @@ deepdive.schema.variables { - has_spouse.label: Boolean - } - - - deepdive.extraction.extractors.extraction_rule_6 { - input: """ SELECT * FROM ext_people_input - """ - output_relation: "people_mentions" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" - style: "tsv_extractor" - dependencies: [ "extraction_rule_8" ] + f_has_spouse.label: Boolean } deepdive.extraction.extractors.extraction_rule_8 { sql: """ DROP VIEW IF EXISTS ext_people_input; CREATE VIEW ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" - FROM sentences R0 + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + FROM sentences R0 """ style: "sql_extractor" - + } - deepdive.extraction.extractors.extraction_rule_10 { - input: """ SELECT * FROM ext_has_spouse_input + deepdive.extraction.extractors.extraction_rule_16 { + sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; + CREATE VIEW ext_has_spouse_features_input AS + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + FROM sentences R0, has_spouse R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ - output_relation: "has_spouse_candidates" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" - style: "tsv_extractor" - dependencies: [ "extraction_rule_12" ] + style: "sql_extractor" + dependencies: [ "extraction_rule_10" , "extraction_rule_6" ] } deepdive.extraction.extractors.extraction_rule_12 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; CREATE VIEW ext_has_spouse_input AS - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" - FROM people_mentions R0, people_mentions R1 + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + FROM people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_6" ] + dependencies: [ "extraction_rule_6" ] } - deepdive.extraction.extractors.extraction_rule_14 { - input: """ SELECT * FROM ext_has_spouse_features_input - """ - output_relation: "has_spouse_features" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" - style: "tsv_extractor" - dependencies: [ "extraction_rule_16" ] - } - + deepdive.extraction.extractors.extraction_rule_6 { + input: """ SELECT * FROM ext_people_input + """ + output_relation: "people_mentions" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_8" ] + } + - deepdive.extraction.extractors.extraction_rule_16 { - sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; - CREATE VIEW ext_has_spouse_features_input AS - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" - FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id - """ - style: "sql_extractor" - dependencies: [ "extraction_rule_10" , "extraction_rule_6" ] - } - + deepdive.extraction.extractors.extraction_rule_14 { + input: """ SELECT * FROM ext_has_spouse_features_input + """ + output_relation: "has_spouse_features" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_16" ] + } + + + deepdive.extraction.extractors.extraction_rule_10 { + input: """ SELECT * FROM ext_has_spouse_input + """ + output_relation: "has_spouse" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_12" ] + } + - deepdive.extraction.extractors.extraction_rule_has_spouse { - sql: """ DROP TABLE IF EXISTS has_spouse; - CREATE TABLE has_spouse AS - SELECT DISTINCT 0 as id, R0.relation_id, R0.l AS label - FROM has_spouse_candidates R0, has_spouse_features R1 + deepdive.extraction.extractors.extraction_rule_f_has_spouse { + sql: """ DROP TABLE IF EXISTS f_has_spouse; + CREATE TABLE f_has_spouse AS + SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label + FROM has_spouse R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id + """ style: "sql_extractor" dependencies: [ "extraction_rule_10" , "extraction_rule_14" ] } - deepdive.inference.factors.factor_has_spouse { - input_query: """ - SELECT R0.id AS "has_spouse.R0.id" , R0.label AS "has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" - FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 + deepdive.inference.factors.factor_f_has_spouse { + input_query: """ + SELECT R0.id AS "f_has_spouse.R0.id" , R0.label AS "f_has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" + FROM f_has_spouse R0, has_spouse R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(has_spouse.R0.label)" - weight: "?(has_spouse_features.R2.feature)" - } - + function: "Imply(f_has_spouse.R0.label)" + weight: "?(has_spouse_features.R2.feature)" + } + diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index f7b87deed..8a647f61b 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -1,6 +1,7 @@ articles( article_id text, text text). + sentences( document_id text, sentence text, @@ -11,6 +12,7 @@ sentences( ner_tags text[], sentence_offset int, sentence_id text). + people_mentions( sentence_id text, start_position int, @@ -18,17 +20,19 @@ people_mentions( text text, mention_id text). -has_spouse_candidates( +has_spouse( person1_id text, person2_id text, sentence_id text, description text, - relation_id text). + relation_id text, + is_true boolean). + has_spouse_features( relation_id text, feature text). -has_spouse?(relation_id text). +f_has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). @@ -45,7 +49,7 @@ function ext_people over like ext_people_input returns like people_mentions implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. -has_spouse_candidates :- +has_spouse :- !ext_has_spouse(ext_has_spouse_input). ext_has_spouse_input( @@ -60,7 +64,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, c, d, p2_text, p2_id). function ext_has_spouse over like ext_has_spouse_input - returns like has_spouse_candidates + returns like has_spouse implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- @@ -76,7 +80,7 @@ ext_has_spouse_features_input( ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). @@ -84,8 +88,8 @@ function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. -has_spouse(rid) :- - has_spouse_candidates(a, b, c, d, rid, l), +f_has_spouse(rid) :- + has_spouse(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f label = l. diff --git a/examples/spouse_example.print.expected b/examples/spouse_example.print.expected index 6787de76d..fe9cf190c 100644 --- a/examples/spouse_example.print.expected +++ b/examples/spouse_example.print.expected @@ -17,16 +17,17 @@ people_mentions(sentence_id text, text text, mention_id text). -has_spouse_candidates(person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text). +has_spouse(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). has_spouse_features(relation_id text, feature text). -has_spouse?(relation_id text). +f_has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). @@ -43,7 +44,7 @@ function ext_people implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. -has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). +has_spouse :- !ext_has_spouse(ext_has_spouse_input). ext_has_spouse_input(sentence_id text, p1_id text, @@ -57,7 +58,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like ext_has_spouse_input - returns like has_spouse_candidates + returns like has_spouse implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. @@ -72,7 +73,7 @@ ext_has_spouse_features_input(words text[], ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). @@ -82,8 +83,8 @@ function ext_has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. -has_spouse(rid) :- - has_spouse_candidates(a, b, c, d, rid, l), +f_has_spouse(rid) :- + has_spouse(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f label = l. diff --git a/examples/spouse_incremental_example.compile.expected b/examples/spouse_incremental_example.compile.expected index d02f580f9..2e39ffc25 100644 --- a/examples/spouse_incremental_example.compile.expected +++ b/examples/spouse_incremental_example.compile.expected @@ -11,7 +11,7 @@ deepdive.schema.variables { - dd_delta_has_spouse.label: Boolean + dd_delta_f_has_spouse.label: Boolean } @@ -30,50 +30,50 @@ deepdive.extraction.extractors.extraction_rule_27 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse R1, people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse R1, people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse R1, people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse R1, people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" @@ -122,24 +122,24 @@ deepdive.extraction.extractors.extraction_rule_17 { input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ - output_relation: "dd_delta_has_spouse_candidates" + output_relation: "dd_delta_has_spouse" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" style: "tsv_extractor" dependencies: [ "extraction_rule_20" ] } - deepdive.extraction.extractors.extraction_rule_dd_delta_has_spouse { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse; - CREATE TABLE dd_delta_has_spouse AS - SELECT DISTINCT 0 as id, R0.relation_id, R0.l AS label , R0.dd_count * R1.dd_count AS dd_count - FROM dd_delta_has_spouse_candidates R0, has_spouse_features R1 + deepdive.extraction.extractors.extraction_rule_dd_delta_f_has_spouse { + sql: """ DROP TABLE IF EXISTS dd_delta_f_has_spouse; + CREATE TABLE dd_delta_f_has_spouse AS + SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count + FROM dd_delta_has_spouse R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id - UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.l AS label , R0.dd_count * R1.dd_count AS dd_count - FROM has_spouse_candidates R0, dd_delta_has_spouse_features R1 + UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count + FROM has_spouse R0, dd_delta_has_spouse_features R1 WHERE R1.relation_id = R0.relation_id - UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.l AS label , R0.dd_count * R1.dd_count AS dd_count - FROM dd_delta_has_spouse_candidates R0, dd_delta_has_spouse_features R1 + UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count + FROM dd_delta_has_spouse R0, dd_delta_has_spouse_features R1 WHERE R1.relation_id = R0.relation_id """ @@ -148,32 +148,32 @@ } - deepdive.inference.factors.factor_dd_delta_has_spouse { + deepdive.inference.factors.factor_dd_delta_f_has_spouse { input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 + SELECT R0.id AS "dd_delta_f_has_spouse.R0.id" , R0.label AS "dd_delta_f_has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_f_has_spouse R0, dd_delta_has_spouse R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_has_spouse.R0.label)" + function: "Imply(dd_delta_f_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } - deepdive.inference.factors.factor_dd_delta_has_spouse { + deepdive.inference.factors.factor_dd_delta_f_has_spouse { input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, has_spouse_candidates R1, dd_delta_has_spouse_features R2 + SELECT R0.id AS "dd_delta_f_has_spouse.R0.id" , R0.label AS "dd_delta_f_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_f_has_spouse R0, has_spouse R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_has_spouse.R0.label)" + function: "Imply(dd_delta_f_has_spouse.R0.label)" weight: "?(dd_delta_has_spouse_features.R2.feature)" } - deepdive.inference.factors.factor_dd_delta_has_spouse { + deepdive.inference.factors.factor_dd_delta_f_has_spouse { input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, dd_delta_has_spouse_features R2 + SELECT R0.id AS "dd_delta_f_has_spouse.R0.id" , R0.label AS "dd_delta_f_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_f_has_spouse R0, dd_delta_has_spouse R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_has_spouse.R0.label)" + function: "Imply(dd_delta_f_has_spouse.R0.label)" weight: "?(dd_delta_has_spouse_features.R2.feature)" } diff --git a/examples/spouse_incremental_example.ddl b/examples/spouse_incremental_example.ddl index 6ff0ae117..04501621e 100644 --- a/examples/spouse_incremental_example.ddl +++ b/examples/spouse_incremental_example.ddl @@ -2,6 +2,7 @@ articles( article_id text, text text, dd_count int). + sentences( document_id text, sentence text, @@ -13,6 +14,7 @@ sentences( sentence_offset int, sentence_id text, dd_count int). + people_mentions( sentence_id text, start_position int, @@ -21,21 +23,21 @@ people_mentions( mention_id text, dd_count int). -has_spouse_candidates( +has_spouse( person1_id text, person2_id text, sentence_id text, description text, relation_id text, + is_true boolean, dd_count int). + has_spouse_features( relation_id text, feature text, dd_count int). -has_spouse?( - relation_id text, - dd_count int). +f_has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). @@ -53,7 +55,7 @@ function ext_people over like ext_people_input returns like people_mentions implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. -has_spouse_candidates :- +has_spouse :- !ext_has_spouse(ext_has_spouse_input). ext_has_spouse_input( @@ -69,7 +71,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, c, d, p2_text, p2_id). function ext_has_spouse over like ext_has_spouse_input - returns like has_spouse_candidates + returns like has_spouse implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- @@ -86,7 +88,7 @@ ext_has_spouse_features_input( ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). @@ -94,8 +96,8 @@ function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. -has_spouse(rid) :- - has_spouse_candidates(a, b, c, d, rid, l), +f_has_spouse(rid) :- + has_spouse(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f label = l. diff --git a/examples/spouse_incremental_example.print.expected b/examples/spouse_incremental_example.print.expected index a9e5ad633..fb6e1e3d8 100644 --- a/examples/spouse_incremental_example.print.expected +++ b/examples/spouse_incremental_example.print.expected @@ -36,17 +36,19 @@ dd_delta_people_mentions(sentence_id text, text text, mention_id text). -has_spouse_candidates(person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text). - -dd_delta_has_spouse_candidates(person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text). +has_spouse(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +dd_delta_has_spouse(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). has_spouse_features(relation_id text, feature text). @@ -54,9 +56,9 @@ has_spouse_features(relation_id text, dd_delta_has_spouse_features(relation_id text, feature text). -has_spouse?(relation_id text). +f_has_spouse?(relation_id text). -dd_delta_has_spouse?(relation_id text). +dd_delta_f_has_spouse?(relation_id text). dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). @@ -77,7 +79,7 @@ function ext_people implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. -dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). +dd_delta_has_spouse :- !ext_has_spouse(dd_delta_ext_has_spouse_input). ext_has_spouse_input(sentence_id text, p1_id text, @@ -105,7 +107,7 @@ dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like dd_delta_ext_has_spouse_input - returns like dd_delta_has_spouse_candidates + returns like dd_delta_has_spouse implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. @@ -127,91 +129,91 @@ dd_delta_ext_has_spouse_features_input(words text[], dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + has_spouse(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). @@ -221,18 +223,18 @@ function ext_has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. -dd_delta_has_spouse(rid) :- - dd_delta_has_spouse_candidates(a, b, c, d, rid, l), +dd_delta_f_has_spouse(rid) :- + dd_delta_has_spouse(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f label = l. -dd_delta_has_spouse(rid) :- - has_spouse_candidates(a, b, c, d, rid, l), +dd_delta_f_has_spouse(rid) :- + has_spouse(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f label = l. -dd_delta_has_spouse(rid) :- - dd_delta_has_spouse_candidates(a, b, c, d, rid, l), +dd_delta_f_has_spouse(rid) :- + dd_delta_has_spouse(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f label = l. From 6a1805f73d4d77b045e719a5a96c54a29573f72d Mon Sep 17 00:00:00 2001 From: senwu Date: Mon, 11 May 2015 22:31:35 -0700 Subject: [PATCH 067/347] code clean & add more comments --- DeepDiveLogCompiler.scala | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index aed9bb916..fd41dad06 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -77,6 +77,7 @@ class CompilationState( statements : DeepDiveLog.Program ) { // The dependency graph between statements. var dependencies : Map[Statement, Set[Statement]] = new HashMap() + // The statement whether will compile or union to other statements var visable : Set[Statement] = Set() def init() = { @@ -106,13 +107,11 @@ class CompilationState( statements : DeepDiveLog.Program ) { // Given a statement, resolve its name for the compiled extractor block. def resolveExtractorBlockName(s: Statement): String = { - if (visable contains s) { - s match { - case s: FunctionCallRule => s"extraction_rule_${statements indexOf s}" - case s: ExtractionRule => s"extraction_rule_${statements indexOf s}" - case s: InferenceRule => s"extraction_rule_${s.q.head.name}" - } - } else "" + s match { + case s: FunctionCallRule => s"extraction_rule_${statements indexOf s}" + case s: ExtractionRule => s"extraction_rule_${statements indexOf s}" + case s: InferenceRule => s"extraction_rule_${s.q.head.name}" + } } // Given a variable, resolve it. TODO: This should give a warning, @@ -492,7 +491,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // take an initial pass to analyze the parsed program val state = new CompilationState( programToCompile ) - + // compile compilation states by head name based on type val extractionRuleToCompile = new ListBuffer[ExtractionRule]() val inferenceRuleToCompile = new ListBuffer[InferenceRule]() val functionCallRuleToCompile = new ListBuffer[FunctionCallRule]() From f16166e4d730b08cbad3d59440954a035f8e9014 Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 12 May 2015 03:19:41 -0700 Subject: [PATCH 068/347] fix inference rule name in spouse example --- examples/spouse_example.compile.expected | 24 ++-- examples/spouse_example.ddl | 14 +- examples/spouse_example.print.expected | 24 ++-- ...pouse_incremental_example.compile.expected | 100 +++++++------- examples/spouse_incremental_example.ddl | 14 +- .../spouse_incremental_example.print.expected | 122 ++++++++++-------- 6 files changed, 157 insertions(+), 141 deletions(-) diff --git a/examples/spouse_example.compile.expected b/examples/spouse_example.compile.expected index d463a2f83..10a58e063 100644 --- a/examples/spouse_example.compile.expected +++ b/examples/spouse_example.compile.expected @@ -11,7 +11,7 @@ deepdive.schema.variables { - f_has_spouse.label: Boolean + has_spouse.label: Boolean } @@ -30,8 +30,8 @@ deepdive.extraction.extractors.extraction_rule_16 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; CREATE VIEW ext_has_spouse_features_input AS - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" - FROM sentences R0, has_spouse R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" @@ -74,18 +74,18 @@ deepdive.extraction.extractors.extraction_rule_10 { input: """ SELECT * FROM ext_has_spouse_input """ - output_relation: "has_spouse" + output_relation: "has_spouse_candidates" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" style: "tsv_extractor" dependencies: [ "extraction_rule_12" ] } - deepdive.extraction.extractors.extraction_rule_f_has_spouse { - sql: """ DROP TABLE IF EXISTS f_has_spouse; - CREATE TABLE f_has_spouse AS + deepdive.extraction.extractors.extraction_rule_has_spouse { + sql: """ DROP TABLE IF EXISTS has_spouse; + CREATE TABLE has_spouse AS SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label - FROM has_spouse R0, has_spouse_features R1 + FROM has_spouse_candidates R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id """ @@ -94,12 +94,12 @@ } - deepdive.inference.factors.factor_f_has_spouse { + deepdive.inference.factors.factor_has_spouse { input_query: """ - SELECT R0.id AS "f_has_spouse.R0.id" , R0.label AS "f_has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" - FROM f_has_spouse R0, has_spouse R1, has_spouse_features R2 + SELECT R0.id AS "has_spouse.R0.id" , R0.label AS "has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" + FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(f_has_spouse.R0.label)" + function: "Imply(has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index 8a647f61b..ac5546486 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -20,7 +20,7 @@ people_mentions( text text, mention_id text). -has_spouse( +has_spouse_candidates( person1_id text, person2_id text, sentence_id text, @@ -32,7 +32,7 @@ has_spouse_features( relation_id text, feature text). -f_has_spouse?(relation_id text). +has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). @@ -49,7 +49,7 @@ function ext_people over like ext_people_input returns like people_mentions implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. -has_spouse :- +has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). ext_has_spouse_input( @@ -64,7 +64,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, c, d, p2_text, p2_id). function ext_has_spouse over like ext_has_spouse_input - returns like has_spouse + returns like has_spouse_candidates implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- @@ -80,7 +80,7 @@ ext_has_spouse_features_input( ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). @@ -88,8 +88,8 @@ function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. -f_has_spouse(rid) :- - has_spouse(a, b, c, d, rid, l), +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f label = l. diff --git a/examples/spouse_example.print.expected b/examples/spouse_example.print.expected index fe9cf190c..4aedd134f 100644 --- a/examples/spouse_example.print.expected +++ b/examples/spouse_example.print.expected @@ -17,17 +17,17 @@ people_mentions(sentence_id text, text text, mention_id text). -has_spouse(person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text, - is_true boolean). +has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). has_spouse_features(relation_id text, feature text). -f_has_spouse?(relation_id text). +has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). @@ -44,7 +44,7 @@ function ext_people implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. -has_spouse :- !ext_has_spouse(ext_has_spouse_input). +has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). ext_has_spouse_input(sentence_id text, p1_id text, @@ -58,7 +58,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like ext_has_spouse_input - returns like has_spouse + returns like has_spouse_candidates implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. @@ -73,7 +73,7 @@ ext_has_spouse_features_input(words text[], ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). @@ -83,8 +83,8 @@ function ext_has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. -f_has_spouse(rid) :- - has_spouse(a, b, c, d, rid, l), +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f label = l. diff --git a/examples/spouse_incremental_example.compile.expected b/examples/spouse_incremental_example.compile.expected index 2e39ffc25..4e009898a 100644 --- a/examples/spouse_incremental_example.compile.expected +++ b/examples/spouse_incremental_example.compile.expected @@ -11,7 +11,7 @@ deepdive.schema.variables { - dd_delta_f_has_spouse.label: Boolean + dd_delta_has_spouse.label: Boolean } @@ -30,50 +30,50 @@ deepdive.extraction.extractors.extraction_rule_27 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, has_spouse R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, has_spouse R1, people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse R1, people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse R1, people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse R1, people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, has_spouse R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" @@ -122,24 +122,24 @@ deepdive.extraction.extractors.extraction_rule_17 { input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ - output_relation: "dd_delta_has_spouse" + output_relation: "dd_delta_has_spouse_candidates" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" style: "tsv_extractor" dependencies: [ "extraction_rule_20" ] } - deepdive.extraction.extractors.extraction_rule_dd_delta_f_has_spouse { - sql: """ DROP TABLE IF EXISTS dd_delta_f_has_spouse; - CREATE TABLE dd_delta_f_has_spouse AS + deepdive.extraction.extractors.extraction_rule_dd_delta_has_spouse { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse; + CREATE TABLE dd_delta_has_spouse AS SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count - FROM dd_delta_has_spouse R0, has_spouse_features R1 + FROM dd_delta_has_spouse_candidates R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count - FROM has_spouse R0, dd_delta_has_spouse_features R1 + FROM has_spouse_candidates R0, dd_delta_has_spouse_features R1 WHERE R1.relation_id = R0.relation_id UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count - FROM dd_delta_has_spouse R0, dd_delta_has_spouse_features R1 + FROM dd_delta_has_spouse_candidates R0, dd_delta_has_spouse_features R1 WHERE R1.relation_id = R0.relation_id """ @@ -148,32 +148,32 @@ } - deepdive.inference.factors.factor_dd_delta_f_has_spouse { + deepdive.inference.factors.factor_dd_delta_has_spouse { input_query: """ - SELECT R0.id AS "dd_delta_f_has_spouse.R0.id" , R0.label AS "dd_delta_f_has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_f_has_spouse R0, dd_delta_has_spouse R1, has_spouse_features R2 + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_f_has_spouse.R0.label)" + function: "Imply(dd_delta_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } - deepdive.inference.factors.factor_dd_delta_f_has_spouse { + deepdive.inference.factors.factor_dd_delta_has_spouse { input_query: """ - SELECT R0.id AS "dd_delta_f_has_spouse.R0.id" , R0.label AS "dd_delta_f_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_f_has_spouse R0, has_spouse R1, dd_delta_has_spouse_features R2 + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_f_has_spouse.R0.label)" + function: "Imply(dd_delta_has_spouse.R0.label)" weight: "?(dd_delta_has_spouse_features.R2.feature)" } - deepdive.inference.factors.factor_dd_delta_f_has_spouse { + deepdive.inference.factors.factor_dd_delta_has_spouse { input_query: """ - SELECT R0.id AS "dd_delta_f_has_spouse.R0.id" , R0.label AS "dd_delta_f_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_f_has_spouse R0, dd_delta_has_spouse R1, dd_delta_has_spouse_features R2 + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_f_has_spouse.R0.label)" + function: "Imply(dd_delta_has_spouse.R0.label)" weight: "?(dd_delta_has_spouse_features.R2.feature)" } diff --git a/examples/spouse_incremental_example.ddl b/examples/spouse_incremental_example.ddl index 04501621e..cf467ac8c 100644 --- a/examples/spouse_incremental_example.ddl +++ b/examples/spouse_incremental_example.ddl @@ -23,7 +23,7 @@ people_mentions( mention_id text, dd_count int). -has_spouse( +has_spouse_candidates( person1_id text, person2_id text, sentence_id text, @@ -37,7 +37,7 @@ has_spouse_features( feature text, dd_count int). -f_has_spouse?(relation_id text). +has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). @@ -55,7 +55,7 @@ function ext_people over like ext_people_input returns like people_mentions implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. -has_spouse :- +has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). ext_has_spouse_input( @@ -71,7 +71,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, c, d, p2_text, p2_id). function ext_has_spouse over like ext_has_spouse_input - returns like has_spouse + returns like has_spouse_candidates implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- @@ -88,7 +88,7 @@ ext_has_spouse_features_input( ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). @@ -96,8 +96,8 @@ function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. -f_has_spouse(rid) :- - has_spouse(a, b, c, d, rid, l), +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f label = l. diff --git a/examples/spouse_incremental_example.print.expected b/examples/spouse_incremental_example.print.expected index fb6e1e3d8..406f28b4a 100644 --- a/examples/spouse_incremental_example.print.expected +++ b/examples/spouse_incremental_example.print.expected @@ -1,8 +1,10 @@ articles(article_id text, - text text). + text text, + dd_count int). dd_delta_articles(article_id text, - text text). + text text, + dd_count int). sentences(document_id text, sentence text, @@ -12,7 +14,8 @@ sentences(document_id text, dependencies text[], ner_tags text[], sentence_offset int, - sentence_id text). + sentence_id text, + dd_count int). dd_delta_sentences(document_id text, sentence text, @@ -22,53 +25,62 @@ dd_delta_sentences(document_id text, dependencies text[], ner_tags text[], sentence_offset int, - sentence_id text). + sentence_id text, + dd_count int). people_mentions(sentence_id text, start_position int, length int, text text, - mention_id text). + mention_id text, + dd_count int). dd_delta_people_mentions(sentence_id text, start_position int, length int, text text, - mention_id text). - -has_spouse(person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text, - is_true boolean). - -dd_delta_has_spouse(person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text, - is_true boolean). + mention_id text, + dd_count int). + +has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int). + +dd_delta_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int). has_spouse_features(relation_id text, - feature text). + feature text, + dd_count int). dd_delta_has_spouse_features(relation_id text, - feature text). + feature text, + dd_count int). -f_has_spouse?(relation_id text). +has_spouse?(relation_id text). -dd_delta_f_has_spouse?(relation_id text). +dd_delta_has_spouse?(relation_id text). dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). ext_people_input(sentence_id text, words text[], - ner_tags text[]). + ner_tags text[], + dd_count int). dd_delta_ext_people_input(sentence_id text, words text[], - ner_tags text[]). + ner_tags text[], + dd_count int). dd_delta_ext_people_input(s, words, ner_tags) :- dd_delta_sentences(a, b, words, c, d, e, ner_tags, f, s). @@ -79,19 +91,21 @@ function ext_people implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. -dd_delta_has_spouse :- !ext_has_spouse(dd_delta_ext_has_spouse_input). +dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). ext_has_spouse_input(sentence_id text, p1_id text, p1_text text, p2_id text, - p2_text text). + p2_text text, + dd_count int). dd_delta_ext_has_spouse_input(sentence_id text, p1_id text, p1_text text, p2_id text, - p2_text text). + p2_text text, + dd_count int). dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- dd_delta_people_mentions(s, a, b, p1_text, p1_id), @@ -107,7 +121,7 @@ dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like dd_delta_ext_has_spouse_input - returns like dd_delta_has_spouse + returns like dd_delta_has_spouse_candidates implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. @@ -118,102 +132,104 @@ ext_has_spouse_features_input(words text[], p1_start_position int, p1_length int, p2_start_position int, - p2_length int). + p2_length int, + dd_count int). dd_delta_ext_has_spouse_features_input(words text[], relation_id text, p1_start_position int, p1_length int, p2_start_position int, - p2_length int). + p2_length int, + dd_count int). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse(person1_id, person2_id, s, h, rid, x), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse(person1_id, person2_id, s, h, rid, x), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). @@ -223,18 +239,18 @@ function ext_has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. -dd_delta_f_has_spouse(rid) :- - dd_delta_has_spouse(a, b, c, d, rid, l), +dd_delta_has_spouse(rid) :- + dd_delta_has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f label = l. -dd_delta_f_has_spouse(rid) :- - has_spouse(a, b, c, d, rid, l), +dd_delta_has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f label = l. -dd_delta_f_has_spouse(rid) :- - dd_delta_has_spouse(a, b, c, d, rid, l), +dd_delta_has_spouse(rid) :- + dd_delta_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f label = l. From 1e65dbd11b8f70cd4ae6c34279958e08de0f1bf3 Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 12 May 2015 03:23:45 -0700 Subject: [PATCH 069/347] fix typos --- DeepDiveLogCompiler.scala | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index fd41dad06..8c592d31a 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -78,7 +78,7 @@ class CompilationState( statements : DeepDiveLog.Program ) { var dependencies : Map[Statement, Set[Statement]] = new HashMap() // The statement whether will compile or union to other statements - var visable : Set[Statement] = Set() + var visible : Set[Statement] = Set() def init() = { // generate the statements. @@ -95,7 +95,7 @@ class CompilationState( statements : DeepDiveLog.Program ) { case FunctionCallRule(_,_,_) => () } - analyzeVisable(statements) + analyzeVisible(statements) analyzeDependency(statements) } @@ -178,7 +178,7 @@ class CompilationState( statements : DeepDiveLog.Program ) { } // Analyze the block visibility among statements - def analyzeVisable(statements: List[Statement]) = { + def analyzeVisible(statements: List[Statement]) = { val extractionRules = new ListBuffer[ExtractionRule]() val functionCallRules = new ListBuffer[FunctionCallRule]() val inferenceRules = new ListBuffer[InferenceRule]() @@ -194,9 +194,9 @@ class CompilationState( statements : DeepDiveLog.Program ) { val functionCallRulesGroup = functionCallRules.groupBy(_.input) val inferenceRulesGroup = inferenceRules.groupBy(_.q.head.name) - extractionRulesGroup foreach {keyVal => visable += keyVal._2(0)} - functionCallRulesGroup foreach {keyVal => visable += keyVal._2(0)} - inferenceRulesGroup foreach {keyVal => visable += keyVal._2(0)} + extractionRulesGroup foreach {keyVal => visible += keyVal._2(0)} + functionCallRulesGroup foreach {keyVal => visible += keyVal._2(0)} + inferenceRulesGroup foreach {keyVal => visible += keyVal._2(0)} } // Analyze the dependency between statements and construct a graph. @@ -237,7 +237,7 @@ class CompilationState( statements : DeepDiveLog.Program ) { def generateDependenciesOfCompiledBlockFor(statements: List[Statement]): String = { var dependentExtractorBlockNames = Set[String]() for (statement <- statements) { - dependentExtractorBlockNames ++= ((dependencies getOrElse (statement, Set())) & visable) map resolveExtractorBlockName + dependentExtractorBlockNames ++= ((dependencies getOrElse (statement, Set())) & visible) map resolveExtractorBlockName } if (dependentExtractorBlockNames.size == 0) "" else { val depStr = dependentExtractorBlockNames map {" \"" + _ + "\" "} mkString(", ") From da69fac695bf910a4b60781f9d687ac439fc5468 Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 12 May 2015 04:19:11 -0700 Subject: [PATCH 070/347] address all Jaeho\ comments --- DeepDiveLogCompiler.scala | 127 ++++++++++++++------------------------ 1 file changed, 46 insertions(+), 81 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 8c592d31a..eddc6b10d 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -63,7 +63,7 @@ import scala.collection.mutable.ListBuffer // This handles the schema statements. // It can tell you if a predicate is a "query" predicate or a "ground prediate" // and it resolves Variables their correct and true name in the schema, i.e. R(x,y) then x could be Attribute1 declared. -class CompilationState( statements : DeepDiveLog.Program ) { +class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.Config ) { // TODO: refactor the schema into a class that constructs and // manages these maps. Also it should have appropriate // abstractions and error handling for missing values. @@ -80,8 +80,17 @@ class CompilationState( statements : DeepDiveLog.Program ) { // The statement whether will compile or union to other statements var visible : Set[Statement] = Set() + var isIncremental : Boolean = false + + // Mapping head names to the actual statements + var extractionRuleGroupByHead : Map[String, List[ExtractionRule]] = new HashMap[String, List[ExtractionRule]]() + var inferenceRuleGroupByHead : Map[String, List[InferenceRule]] = new HashMap[String, List[InferenceRule]]() + var functionCallRuleGroupByInput : Map[String, List[FunctionCallRule]] = new HashMap[String, List[FunctionCallRule]]() + var functionCallRuleGroupByOutput : Map[String, List[FunctionCallRule]] = new HashMap[String, List[FunctionCallRule]]() + def init() = { // generate the statements. + isIncremental = config.isIncremental statements.foreach { case SchemaDeclaration(Attribute(r, terms, types), isQuery) => terms.foreach { @@ -94,7 +103,7 @@ class CompilationState( statements : DeepDiveLog.Program ) { case fdecl : FunctionDeclaration => function_schema += {fdecl.functionName -> fdecl} case FunctionCallRule(_,_,_) => () } - + groupByHead(statements) analyzeVisible(statements) analyzeDependency(statements) } @@ -136,7 +145,7 @@ class CompilationState( statements : DeepDiveLog.Program ) { if( ground_relations contains relName ) !ground_relations(relName) else true } - // resolve a column name with alias + // Resolve a column name with alias def resolveColumn(s: String, qs: QuerySchema, q : ConjunctiveQuery, alias: Boolean) : Option[String] = { val index = qs.getBodyIndex(s) val name = resolveName(qs.getVar(s)) @@ -177,55 +186,37 @@ class CompilationState( statements : DeepDiveLog.Program ) { ${ whereClauseStr }""" } - // Analyze the block visibility among statements - def analyzeVisible(statements: List[Statement]) = { - val extractionRules = new ListBuffer[ExtractionRule]() - val functionCallRules = new ListBuffer[FunctionCallRule]() - val inferenceRules = new ListBuffer[InferenceRule]() - + // Group statements by head + def groupByHead(statements: List[Statement]) = { + // Compile compilation states by head name based on type + val extractionRuleToCompile = new ListBuffer[ExtractionRule]() + val inferenceRuleToCompile = new ListBuffer[InferenceRule]() + val functionCallRuleToCompile = new ListBuffer[FunctionCallRule]() statements foreach (_ match { - case s: ExtractionRule => extractionRules += s - case s: FunctionCallRule => functionCallRules += s - case s: InferenceRule => inferenceRules += s + case s: ExtractionRule => extractionRuleToCompile += s + case s: FunctionCallRule => functionCallRuleToCompile += s + case s: InferenceRule => inferenceRuleToCompile += s case _ => }) - val extractionRulesGroup = extractionRules.groupBy(_.q.head.name) - val functionCallRulesGroup = functionCallRules.groupBy(_.input) - val inferenceRulesGroup = inferenceRules.groupBy(_.q.head.name) + extractionRuleGroupByHead = extractionRuleToCompile.toList.groupBy(_.q.head.name) + inferenceRuleGroupByHead = inferenceRuleToCompile.toList.groupBy(_.q.head.name) + functionCallRuleGroupByInput = functionCallRuleToCompile.toList.groupBy(_.input) + functionCallRuleGroupByOutput = functionCallRuleToCompile.toList.groupBy(_.output) + } - extractionRulesGroup foreach {keyVal => visible += keyVal._2(0)} - functionCallRulesGroup foreach {keyVal => visible += keyVal._2(0)} - inferenceRulesGroup foreach {keyVal => visible += keyVal._2(0)} + // Analyze the block visibility among statements + def analyzeVisible(statements: List[Statement]) = { + extractionRuleGroupByHead foreach {keyVal => visible += keyVal._2(0)} + functionCallRuleGroupByInput foreach {keyVal => visible += keyVal._2(0)} + inferenceRuleGroupByHead foreach {keyVal => visible += keyVal._2(0)} } // Analyze the dependency between statements and construct a graph. def analyzeDependency(statements: List[Statement]) = { - // first map head names to the actual statement - val extractionRuleByHeadName = new ListBuffer[ExtractionRule]() - val inferenceRuleByHeadName = new ListBuffer[InferenceRule]() - val functionCallRuleByHeadName = new ListBuffer[FunctionCallRule]() - - statements foreach { - case s: ExtractionRule => extractionRuleByHeadName += s - case s: FunctionCallRule => functionCallRuleByHeadName += s - case s: InferenceRule => inferenceRuleByHeadName += s - case _ => - } + var stmtByHeadName = (extractionRuleGroupByHead.toSeq ++ inferenceRuleGroupByHead.toSeq ++ functionCallRuleGroupByOutput.toSeq).groupBy(_._1).mapValues(_.map(_._2).toList) - val eByHeadNameGroup = extractionRuleByHeadName.toList.groupBy(_.q.head.name) - val iByHeadNameGroup = inferenceRuleByHeadName.toList.groupBy(_.q.head.name) - val fByHeadNameGroup = functionCallRuleByHeadName.toList.groupBy(_.output) - - var stmtByHeadName = (eByHeadNameGroup.toSeq ++ iByHeadNameGroup.toSeq ++ fByHeadNameGroup.toSeq).groupBy(_._1).mapValues(_.map(_._2).toList) - // var stmtByHeadName = new HashMap[String, Statement]() - // statements foreach { - // case e : ExtractionRule => stmtByHeadName += { e.q.head.name -> e } - // case f : FunctionCallRule => stmtByHeadName += { f.output -> f } - // case w : InferenceRule => stmtByHeadName += { w.q.head.name -> w } - // case _ => - // } - // then, look at the body of each statement to construct a dependency graph + // Look at the body of each statement to construct a dependency graph statements foreach { case f : FunctionCallRule => dependencies += { f -> (( Some(f.input) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } case e : ExtractionRule => dependencies += { e -> ((e.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } @@ -280,16 +271,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { type CompiledBlock = String type CompiledBlocks = List[CompiledBlock] - // Dispatch to the corresponding compile function - // def compile(stmts: List[Statement], ss: CompilationState, isIncremental: Boolean): CompiledBlocks = stmts(0) match { - // case s: ExtractionRule => compileE(stmts, ss, isIncremental) - // case s: FunctionCallRule => compileF(stmts, ss, isIncremental) - // case s: InferenceRule => compileI(stmts, ss, isIncremental) - // case _ => List() // defaults to compiling into empty block - // } - // Generate extraction rule part for deepdive - def compileE(stmts: List[ExtractionRule], ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { + def compileExtractionRules(stmts: List[ExtractionRule], ss: CompilationState): CompiledBlocks = { var inputQueries = new ListBuffer[String]() for (stmt <- stmts) { // Generate the body of the query. @@ -299,11 +282,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, stmt.q, true) } - val variableColsStr = if (variableCols.length > 0) Some(variableCols.mkString(", ")) else None - - val selectStr = (List(variableColsStr) flatMap (u => u)).mkString(", ") - - val ddCount = if (isIncremental) ( stmt.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val selectStr = variableCols.mkString(", ") + val ddCount = if (ss.isIncremental) ( stmt.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" inputQueries += s""" @@ -324,7 +304,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { List(extractor) } - def compileF(stmts: List[FunctionCallRule], ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { + def compileFunctionCallRules(stmts: List[FunctionCallRule], ss: CompilationState): CompiledBlocks = { var extractors = new ListBuffer[String]() for (stmt <- stmts) { val inputQuery = s""" @@ -358,7 +338,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } // generate inference rule part for deepdive - def compileI(stmts: List[InferenceRule], ss: CompilationState, isIncremental: Boolean): CompiledBlocks = { + def compileInferenceRules(stmts: List[InferenceRule], ss: CompilationState): CompiledBlocks = { var blocks = List[String]() val qs = new QuerySchema( stmts(0).q ) // node query @@ -373,7 +353,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val name = ss.resolveName(qs.getVar(z.supervision)) val labelCol = s"R${index}.${name}" val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") - val ddCount = if (isIncremental) ( z.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCount = if (ss.isIncremental) ( z.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} @@ -396,7 +376,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { if (ss.isQueryTerm(stmts(0).q.head.name)) blocks :::= compileNodeRule(stmts, qs, ss) - val inferenceRuleToCompileGroup = stmts.groupBy(_.q.head.name) + val inferenceRuleGroupByHead = stmts.groupBy(_.q.head.name) for (stmt <- stmts) { // edge query @@ -414,9 +394,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, true)).mkString(", ")) } - val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatMap (u => u)).mkString(", ") + val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatten).mkString(", ") - val ddCount = if (isIncremental) ( fakeCQ.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCount = if (ss.isIncremental) ( fakeCQ.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" // factor input query @@ -489,27 +469,12 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { else parsedProgram // take an initial pass to analyze the parsed program - val state = new CompilationState( programToCompile ) - - // compile compilation states by head name based on type - val extractionRuleToCompile = new ListBuffer[ExtractionRule]() - val inferenceRuleToCompile = new ListBuffer[InferenceRule]() - val functionCallRuleToCompile = new ListBuffer[FunctionCallRule]() - programToCompile foreach (_ match { - case s: ExtractionRule => extractionRuleToCompile += s - case s: FunctionCallRule => functionCallRuleToCompile += s - case s: InferenceRule => inferenceRuleToCompile += s - case _ => - }) - - val extractionRuleToCompileGroup = extractionRuleToCompile.groupBy(_.q.head.name) - val inferenceRuleToCompileGroup = inferenceRuleToCompile.groupBy(_.q.head.name) - val functionCallRuleToCompileGroup = functionCallRuleToCompile.groupBy(_.input) + val state = new CompilationState( programToCompile, config ) val body = new ListBuffer[String]() - extractionRuleToCompileGroup foreach {keyVal => body ++= compileE(keyVal._2.toList, state, config.isIncremental)} - functionCallRuleToCompileGroup foreach {keyVal => body ++= compileF(keyVal._2.toList, state, config.isIncremental)} - inferenceRuleToCompileGroup foreach {keyVal => body ++= compileI(keyVal._2.toList, state, config.isIncremental)} + state.extractionRuleGroupByHead foreach {keyVal => body ++= compileExtractionRules(keyVal._2, state)} + state.functionCallRuleGroupByInput foreach {keyVal => body ++= compileFunctionCallRules(keyVal._2, state)} + state.inferenceRuleGroupByHead foreach {keyVal => body ++= compileInferenceRules(keyVal._2, state)} // compile the program into blocks of application.conf val blocks = ( From f7e7f712de07d7eb280dd2da54ad9debca5136f0 Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 15 May 2015 01:50:00 -0700 Subject: [PATCH 071/347] remove variable column string to make the head body only contains unique keys --- DeepDiveLogCompiler.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index eddc6b10d..13f08c214 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -386,7 +386,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val index = stmt.q.body.length + 1 val qs2 = new QuerySchema( fakeCQ ) val variableIdsStr = Some(s"""R0.id AS "${stmt.q.head.name}.R0.id" """) - val variableColsStr = Some(s"""R0.label AS "${stmt.q.head.name}.R0.label" """) // weight string val uwStr = stmt.weights match { @@ -394,7 +393,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, true)).mkString(", ")) } - val selectStr = (List(variableIdsStr, variableColsStr, uwStr) flatten).mkString(", ") + val selectStr = (List(variableIdsStr, uwStr) flatten).mkString(", ") val ddCount = if (ss.isIncremental) ( fakeCQ.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" From 8bdafbadf14490155bc492b1ab6bbdc7e90a49b5 Mon Sep 17 00:00:00 2001 From: senwu Date: Sat, 16 May 2015 00:10:36 -0700 Subject: [PATCH 072/347] add dd_new table in delta deriver --- DeepDiveLogDeltaDeriver.scala | 75 ++++++++++++++++++++++++++--------- 1 file changed, 56 insertions(+), 19 deletions(-) diff --git a/DeepDiveLogDeltaDeriver.scala b/DeepDiveLogDeltaDeriver.scala index 51f4242f1..336a6429d 100644 --- a/DeepDiveLogDeltaDeriver.scala +++ b/DeepDiveLogDeltaDeriver.scala @@ -4,6 +4,7 @@ object DeepDiveLogDeltaDeriver{ // Default prefix for incremental tables val deltaPrefix = "dd_delta_" + val newPrefix = "dd_new_" def transfer(stmt: Statement): List[Statement] = stmt match { case s: SchemaDeclaration => transfer(s) @@ -17,12 +18,26 @@ object DeepDiveLogDeltaDeriver{ // keep the original scheme and create one delta scheme def transfer(stmt: SchemaDeclaration): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() + // Origin table incrementalStatement += stmt - var newTerms = new ListBuffer[Variable]() + // Delta table + var deltaTerms = new ListBuffer[Variable]() + for (term <- stmt.a.terms) { + deltaTerms += Variable(term.varName, deltaPrefix + term.relName, term.index) + } + var deltaStmt = SchemaDeclaration(Attribute(deltaPrefix + stmt.a.name, deltaTerms.toList, stmt.a.types), stmt.isQuery) + incrementalStatement += deltaStmt + // New table + val newTerms = new ListBuffer[Variable]() for (term <- stmt.a.terms) { - newTerms += Variable(term.varName, deltaPrefix + term.relName, term.index) + newTerms += Variable(term.varName, newPrefix + term.relName, term.index) + } + var newStmt = SchemaDeclaration(Attribute(newPrefix + stmt.a.name, newTerms.toList, stmt.a.types), stmt.isQuery) + incrementalStatement += newStmt + if (!stmt.isQuery) { + incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(newStmt.a.name, newStmt.a.terms.toList), List(Atom(stmt.a.name, stmt.a.terms.toList)))) + incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(newStmt.a.name, newStmt.a.terms.toList), List(Atom(deltaStmt.a.name, deltaStmt.a.terms.toList)))) } - incrementalStatement += SchemaDeclaration(Attribute(deltaPrefix + stmt.a.name, newTerms.toList, stmt.a.types), stmt.isQuery) incrementalStatement.toList } @@ -64,26 +79,37 @@ object DeepDiveLogDeltaDeriver{ newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) } var newStmtCqHead = Atom(deltaPrefix + stmt.q.head.name, newStmtCqHeadTerms.toList) - - var deltaStmtCqBody = new ListBuffer[Atom]() + // dd delta table from dd_delta_ table + var ddDeltaStmtCqBody = new ListBuffer[Atom]() for (stmtCqBody <- stmt.q.body) { // List[Atom] var stmtCqBodyTerms = new ListBuffer[Variable]() for (bodyTerm <- stmtCqBody.terms) { stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) } - deltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) + ddDeltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) + } + // dd new body from dd_new_ table + var ddNewStmtCqBody = new ListBuffer[Atom]() + for (stmtCqBody <- stmt.q.body) { // List[Atom] + var stmtCqBodyTerms = new ListBuffer[Variable]() + for (bodyTerm <- stmtCqBody.terms) { + stmtCqBodyTerms += Variable(bodyTerm.varName, newPrefix + bodyTerm.relName, bodyTerm.index) + } + ddNewStmtCqBody += Atom(newPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) } - // New body + // New statement var i = 0 var j = 0 - for (i <- 1 to ((1 << stmt.q.body.length) - 1)) { + for (i <- 0 to (stmt.q.body.length - 1)) { var newStmtCqBody = new ListBuffer[Atom]() for (j <- 0 to (stmt.q.body.length - 1)) { - if ((i & (1 << j)) == 0) + if (j > i) newStmtCqBody += stmt.q.body(j) - else - newStmtCqBody += deltaStmtCqBody(j) + else if (j < i) + newStmtCqBody += ddNewStmtCqBody(j) + else if (j == i) + newStmtCqBody += ddDeltaStmtCqBody(j) } incrementalStatement += ExtractionRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBody.toList)) } @@ -109,26 +135,37 @@ object DeepDiveLogDeltaDeriver{ newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) } var newStmtCqHead = Atom(deltaPrefix + stmt.q.head.name, newStmtCqHeadTerms.toList) - - var deltaStmtCqBody = new ListBuffer[Atom]() + // dd delta table from dd_delta_ table + var ddDeltaStmtCqBody = new ListBuffer[Atom]() for (stmtCqBody <- stmt.q.body) { // List[Atom] var stmtCqBodyTerms = new ListBuffer[Variable]() for (bodyTerm <- stmtCqBody.terms) { stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) } - deltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) + ddDeltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) + } + // dd new body from dd_new_ table + var ddNewStmtCqBody = new ListBuffer[Atom]() + for (stmtCqBody <- stmt.q.body) { // List[Atom] + var stmtCqBodyTerms = new ListBuffer[Variable]() + for (bodyTerm <- stmtCqBody.terms) { + stmtCqBodyTerms += Variable(bodyTerm.varName, newPrefix + bodyTerm.relName, bodyTerm.index) + } + ddNewStmtCqBody += Atom(newPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) } - // New body + // New statement var i = 0 var j = 0 - for (i <- 1 to ((1 << stmt.q.body.length) - 1)) { + for (i <- 0 to (stmt.q.body.length - 1)) { var newStmtCqBody = new ListBuffer[Atom]() for (j <- 0 to (stmt.q.body.length - 1)) { - if ((i & (1 << j)) == 0) + if (j > i) newStmtCqBody += stmt.q.body(j) - else - newStmtCqBody += deltaStmtCqBody(j) + else if (j < i) + newStmtCqBody += ddNewStmtCqBody(j) + else if (j == i) + newStmtCqBody += ddDeltaStmtCqBody(j) } incrementalStatement += InferenceRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBody.toList), stmt.weights, stmt.supervision) } From 2dafd684b1a399133ed41e449648a85a8becaf96 Mon Sep 17 00:00:00 2001 From: senwu Date: Sat, 16 May 2015 06:15:06 -0700 Subject: [PATCH 073/347] rename funciton to transform --- DeepDiveLogDeltaDeriver.scala | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/DeepDiveLogDeltaDeriver.scala b/DeepDiveLogDeltaDeriver.scala index 336a6429d..bc79f1c11 100644 --- a/DeepDiveLogDeltaDeriver.scala +++ b/DeepDiveLogDeltaDeriver.scala @@ -6,17 +6,17 @@ object DeepDiveLogDeltaDeriver{ val deltaPrefix = "dd_delta_" val newPrefix = "dd_new_" - def transfer(stmt: Statement): List[Statement] = stmt match { - case s: SchemaDeclaration => transfer(s) - case s: FunctionDeclaration => transfer(s) - case s: ExtractionRule => transfer(s) - case s: FunctionCallRule => transfer(s) - case s: InferenceRule => transfer(s) + def transform(stmt: Statement): List[Statement] = stmt match { + case s: SchemaDeclaration => transform(s) + case s: FunctionDeclaration => transform(s) + case s: ExtractionRule => transform(s) + case s: FunctionCallRule => transform(s) + case s: InferenceRule => transform(s) } // Incremental scheme declaration, // keep the original scheme and create one delta scheme - def transfer(stmt: SchemaDeclaration): List[Statement] = { + def transform(stmt: SchemaDeclaration): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() // Origin table incrementalStatement += stmt @@ -26,6 +26,7 @@ object DeepDiveLogDeltaDeriver{ deltaTerms += Variable(term.varName, deltaPrefix + term.relName, term.index) } var deltaStmt = SchemaDeclaration(Attribute(deltaPrefix + stmt.a.name, deltaTerms.toList, stmt.a.types), stmt.isQuery) + incrementalStatement += deltaStmt // New table val newTerms = new ListBuffer[Variable]() @@ -43,7 +44,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental function declaration, // create one delta function scheme based on original function scheme - def transfer(stmt: FunctionDeclaration): List[Statement] = { + def transform(stmt: FunctionDeclaration): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() var newTerms = new ListBuffer[Variable]() var newInputType: RelationType = stmt.inputType match { @@ -70,7 +71,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental extraction rule, // create delta rules based on original extraction rule - def transfer(stmt: ExtractionRule): List[Statement] = { + def transform(stmt: ExtractionRule): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() // New head @@ -118,7 +119,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental function call rule, // modify function input and output - def transfer(stmt: FunctionCallRule): List[Statement] = { + def transform(stmt: FunctionCallRule): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() incrementalStatement += FunctionCallRule(deltaPrefix + stmt.input, deltaPrefix + stmt.output, stmt.function) incrementalStatement.toList @@ -126,7 +127,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental inference rule, // create delta rules based on original extraction rule - def transfer(stmt: InferenceRule): List[Statement] = { + def transform(stmt: InferenceRule): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() // New head @@ -175,7 +176,7 @@ object DeepDiveLogDeltaDeriver{ def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { var incrementalProgram = new ListBuffer[Statement]() for (x <- program) { - incrementalProgram = incrementalProgram ++ transfer(x) + incrementalProgram = incrementalProgram ++ transform(x) } incrementalProgram.toList } From 51075ea272a8478eb74bfb501d148426047b0ca9 Mon Sep 17 00:00:00 2001 From: senwu Date: Sat, 16 May 2015 06:15:42 -0700 Subject: [PATCH 074/347] update new example output --- ...pouse_incremental_example.compile.expected | 233 ++++++++++++------ .../spouse_incremental_example.print.expected | 193 +++++++++------ 2 files changed, 263 insertions(+), 163 deletions(-) diff --git a/examples/spouse_incremental_example.compile.expected b/examples/spouse_incremental_example.compile.expected index 4e009898a..d376e5097 100644 --- a/examples/spouse_incremental_example.compile.expected +++ b/examples/spouse_incremental_example.compile.expected @@ -15,7 +15,73 @@ } - deepdive.extraction.extractors.extraction_rule_15 { + deepdive.extraction.extractors.extraction_rule_51 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; + CREATE VIEW dd_delta_ext_has_spouse_features_input AS + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_28" , "extraction_rule_8" , "extraction_rule_36" , "extraction_rule_13" , "extraction_rule_18" ] + } + + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP VIEW IF EXISTS dd_new_people_mentions; + CREATE VIEW dd_new_people_mentions AS + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.start_position AS "people_mentions.R0.start_position" , R0.length AS "people_mentions.R0.length" , R0.text AS "people_mentions.R0.text" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.dd_count AS "people_mentions.R0.dd_count" , R0.dd_count AS "dd_count" + FROM people_mentions R0 + UNION + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.start_position AS "dd_delta_people_mentions.R0.start_position" , R0.length AS "dd_delta_people_mentions.R0.length" , R0.text AS "dd_delta_people_mentions.R0.text" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.dd_count AS "dd_delta_people_mentions.R0.dd_count" , R0.dd_count AS "dd_count" + FROM dd_delta_people_mentions R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_28" ] + } + + + deepdive.extraction.extractors.extraction_rule_32 { + sql: """ DROP VIEW IF EXISTS dd_new_ext_people_input; + CREATE VIEW dd_new_ext_people_input AS + SELECT R0.sentence_id AS "ext_people_input.R0.sentence_id" , R0.words AS "ext_people_input.R0.words" , R0.ner_tags AS "ext_people_input.R0.ner_tags" , R0.dd_count AS "ext_people_input.R0.dd_count" , R0.dd_count AS "dd_count" + FROM ext_people_input R0 + UNION + SELECT R0.sentence_id AS "dd_delta_ext_people_input.R0.sentence_id" , R0.words AS "dd_delta_ext_people_input.R0.words" , R0.ner_tags AS "dd_delta_ext_people_input.R0.ner_tags" , R0.dd_count AS "dd_delta_ext_people_input.R0.dd_count" , R0.dd_count AS "dd_count" + FROM dd_delta_ext_people_input R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_34" ] + } + + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP VIEW IF EXISTS dd_new_articles; + CREATE VIEW dd_new_articles AS + SELECT R0.article_id AS "articles.R0.article_id" , R0.text AS "articles.R0.text" , R0.dd_count AS "articles.R0.dd_count" , R0.dd_count AS "dd_count" + FROM articles R0 + UNION + SELECT R0.article_id AS "dd_delta_articles.R0.article_id" , R0.text AS "dd_delta_articles.R0.text" , R0.dd_count AS "dd_delta_articles.R0.dd_count" , R0.dd_count AS "dd_count" + FROM dd_delta_articles R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_34 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" @@ -27,105 +93,123 @@ } - deepdive.extraction.extractors.extraction_rule_27 { - sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; - CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, dd_delta_has_spouse_candidates R1, dd_delta_people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP VIEW IF EXISTS dd_new_has_spouse_candidates; + CREATE VIEW dd_new_has_spouse_candidates AS + SELECT R0.person1_id AS "has_spouse_candidates.R0.person1_id" , R0.person2_id AS "has_spouse_candidates.R0.person2_id" , R0.sentence_id AS "has_spouse_candidates.R0.sentence_id" , R0.description AS "has_spouse_candidates.R0.description" , R0.relation_id AS "has_spouse_candidates.R0.relation_id" , R0.is_true AS "has_spouse_candidates.R0.is_true" , R0.dd_count AS "has_spouse_candidates.R0.dd_count" , R0.dd_count AS "dd_count" + FROM has_spouse_candidates R0 + UNION + SELECT R0.person1_id AS "dd_delta_has_spouse_candidates.R0.person1_id" , R0.person2_id AS "dd_delta_has_spouse_candidates.R0.person2_id" , R0.sentence_id AS "dd_delta_has_spouse_candidates.R0.sentence_id" , R0.description AS "dd_delta_has_spouse_candidates.R0.description" , R0.relation_id AS "dd_delta_has_spouse_candidates.R0.relation_id" , R0.is_true AS "dd_delta_has_spouse_candidates.R0.is_true" , R0.dd_count AS "dd_delta_has_spouse_candidates.R0.dd_count" , R0.dd_count AS "dd_count" + FROM dd_delta_has_spouse_candidates R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_36" ] + } + + + deepdive.extraction.extractors.extraction_rule_49 { + sql: """ DROP VIEW IF EXISTS dd_new_ext_has_spouse_features_input; + CREATE VIEW dd_new_ext_has_spouse_features_input AS + SELECT R0.words AS "ext_has_spouse_features_input.R0.words" , R0.relation_id AS "ext_has_spouse_features_input.R0.relation_id" , R0.p1_start_position AS "ext_has_spouse_features_input.R0.p1_start_position" , R0.p1_length AS "ext_has_spouse_features_input.R0.p1_length" , R0.p2_start_position AS "ext_has_spouse_features_input.R0.p2_start_position" , R0.p2_length AS "ext_has_spouse_features_input.R0.p2_length" , R0.dd_count AS "ext_has_spouse_features_input.R0.dd_count" , R0.dd_count AS "dd_count" + FROM ext_has_spouse_features_input R0 + UNION + SELECT R0.words AS "dd_delta_ext_has_spouse_features_input.R0.words" , R0.relation_id AS "dd_delta_ext_has_spouse_features_input.R0.relation_id" , R0.p1_start_position AS "dd_delta_ext_has_spouse_features_input.R0.p1_start_position" , R0.p1_length AS "dd_delta_ext_has_spouse_features_input.R0.p1_length" , R0.p2_start_position AS "dd_delta_ext_has_spouse_features_input.R0.p2_start_position" , R0.p2_length AS "dd_delta_ext_has_spouse_features_input.R0.p2_length" , R0.dd_count AS "dd_delta_ext_has_spouse_features_input.R0.dd_count" , R0.dd_count AS "dd_count" + FROM dd_delta_ext_has_spouse_features_input R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_51" ] + } + + + deepdive.extraction.extractors.extraction_rule_40 { + sql: """ DROP VIEW IF EXISTS dd_new_ext_has_spouse_input; + CREATE VIEW dd_new_ext_has_spouse_input AS + SELECT R0.sentence_id AS "ext_has_spouse_input.R0.sentence_id" , R0.p1_id AS "ext_has_spouse_input.R0.p1_id" , R0.p1_text AS "ext_has_spouse_input.R0.p1_text" , R0.p2_id AS "ext_has_spouse_input.R0.p2_id" , R0.p2_text AS "ext_has_spouse_input.R0.p2_text" , R0.dd_count AS "ext_has_spouse_input.R0.dd_count" , R0.dd_count AS "dd_count" + FROM ext_has_spouse_input R0 + UNION + SELECT R0.sentence_id AS "dd_delta_ext_has_spouse_input.R0.sentence_id" , R0.p1_id AS "dd_delta_ext_has_spouse_input.R0.p1_id" , R0.p1_text AS "dd_delta_ext_has_spouse_input.R0.p1_text" , R0.p2_id AS "dd_delta_ext_has_spouse_input.R0.p2_id" , R0.p2_text AS "dd_delta_ext_has_spouse_input.R0.p2_text" , R0.dd_count AS "dd_delta_ext_has_spouse_input.R0.dd_count" , R0.dd_count AS "dd_count" + FROM dd_delta_ext_has_spouse_input R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_42" ] + } + + + deepdive.extraction.extractors.extraction_rule_23 { + sql: """ DROP VIEW IF EXISTS dd_new_has_spouse_features; + CREATE VIEW dd_new_has_spouse_features AS + SELECT R0.relation_id AS "has_spouse_features.R0.relation_id" , R0.feature AS "has_spouse_features.R0.feature" , R0.dd_count AS "has_spouse_features.R0.dd_count" , R0.dd_count AS "dd_count" + FROM has_spouse_features R0 + UNION + SELECT R0.relation_id AS "dd_delta_has_spouse_features.R0.relation_id" , R0.feature AS "dd_delta_has_spouse_features.R0.feature" , R0.dd_count AS "dd_delta_has_spouse_features.R0.dd_count" , R0.dd_count AS "dd_count" + FROM dd_delta_has_spouse_features R0 + """ style: "sql_extractor" - dependencies: [ "extraction_rule_17" , "extraction_rule_12" ] + dependencies: [ "extraction_rule_45" ] + } + + + deepdive.extraction.extractors.extraction_rule_8 { + sql: """ DROP VIEW IF EXISTS dd_new_sentences; + CREATE VIEW dd_new_sentences AS + SELECT R0.document_id AS "sentences.R0.document_id" , R0.sentence AS "sentences.R0.sentence" , R0.words AS "sentences.R0.words" , R0.lemma AS "sentences.R0.lemma" , R0.pos_tags AS "sentences.R0.pos_tags" , R0.dependencies AS "sentences.R0.dependencies" , R0.ner_tags AS "sentences.R0.ner_tags" , R0.sentence_offset AS "sentences.R0.sentence_offset" , R0.sentence_id AS "sentences.R0.sentence_id" , R0.dd_count AS "sentences.R0.dd_count" , R0.dd_count AS "dd_count" + FROM sentences R0 + UNION + SELECT R0.document_id AS "dd_delta_sentences.R0.document_id" , R0.sentence AS "dd_delta_sentences.R0.sentence" , R0.words AS "dd_delta_sentences.R0.words" , R0.lemma AS "dd_delta_sentences.R0.lemma" , R0.pos_tags AS "dd_delta_sentences.R0.pos_tags" , R0.dependencies AS "dd_delta_sentences.R0.dependencies" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.sentence_offset AS "dd_delta_sentences.R0.sentence_offset" , R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.dd_count AS "dd_delta_sentences.R0.dd_count" , R0.dd_count AS "dd_count" + FROM dd_delta_sentences R0 + + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_20 { + deepdive.extraction.extractors.extraction_rule_42 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" FROM dd_delta_people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id UNION - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" - FROM people_mentions R0, dd_delta_people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id UNION - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" - FROM dd_delta_people_mentions R0, dd_delta_people_mentions R1 + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_12" ] + dependencies: [ "extraction_rule_28" , "extraction_rule_13" ] } - deepdive.extraction.extractors.extraction_rule_12 { + deepdive.extraction.extractors.extraction_rule_28 { input: """ SELECT * FROM dd_delta_ext_people_input """ output_relation: "dd_delta_people_mentions" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_15" ] + dependencies: [ "extraction_rule_34" ] } - deepdive.extraction.extractors.extraction_rule_24 { + deepdive.extraction.extractors.extraction_rule_45 { input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input """ output_relation: "dd_delta_has_spouse_features" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_27" ] + dependencies: [ "extraction_rule_51" ] } - deepdive.extraction.extractors.extraction_rule_17 { + deepdive.extraction.extractors.extraction_rule_36 { input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ output_relation: "dd_delta_has_spouse_candidates" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_20" ] + dependencies: [ "extraction_rule_42" ] } @@ -136,21 +220,18 @@ FROM dd_delta_has_spouse_candidates R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count - FROM has_spouse_candidates R0, dd_delta_has_spouse_features R1 - WHERE R1.relation_id = R0.relation_id - UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count - FROM dd_delta_has_spouse_candidates R0, dd_delta_has_spouse_features R1 + FROM dd_new_has_spouse_candidates R0, dd_delta_has_spouse_features R1 WHERE R1.relation_id = R0.relation_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_17" , "extraction_rule_24" ] + dependencies: [ "extraction_rule_36" , "extraction_rule_18" , "extraction_rule_45" ] } deepdive.inference.factors.factor_dd_delta_has_spouse { input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(dd_delta_has_spouse.R0.label)" @@ -160,18 +241,8 @@ deepdive.inference.factors.factor_dd_delta_has_spouse { input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, has_spouse_candidates R1, dd_delta_has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_has_spouse.R0.label)" - weight: "?(dd_delta_has_spouse_features.R2.feature)" - } - - - deepdive.inference.factors.factor_dd_delta_has_spouse { - input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R0.label AS "dd_delta_has_spouse.R0.label" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, dd_delta_has_spouse_features R2 + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(dd_delta_has_spouse.R0.label)" weight: "?(dd_delta_has_spouse_features.R2.feature)" diff --git a/examples/spouse_incremental_example.print.expected b/examples/spouse_incremental_example.print.expected index 406f28b4a..93fe809c5 100644 --- a/examples/spouse_incremental_example.print.expected +++ b/examples/spouse_incremental_example.print.expected @@ -6,6 +6,16 @@ dd_delta_articles(article_id text, text text, dd_count int). +dd_new_articles(article_id text, + text text, + dd_count int). + +dd_new_articles(article_id, text, dd_count) :- + articles(article_id, text, dd_count). + +dd_new_articles(article_id, text, dd_count) :- + dd_delta_articles(article_id, text, dd_count). + sentences(document_id text, sentence text, words text[], @@ -28,6 +38,23 @@ dd_delta_sentences(document_id text, sentence_id text, dd_count int). +dd_new_sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text, + dd_count int). + +dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count) :- + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count). + +dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count) :- + dd_delta_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count). + people_mentions(sentence_id text, start_position int, length int, @@ -42,6 +69,19 @@ dd_delta_people_mentions(sentence_id text, mention_id text, dd_count int). +dd_new_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int). + +dd_new_people_mentions(sentence_id, start_position, length, text, mention_id, dd_count) :- + people_mentions(sentence_id, start_position, length, text, mention_id, dd_count). + +dd_new_people_mentions(sentence_id, start_position, length, text, mention_id, dd_count) :- + dd_delta_people_mentions(sentence_id, start_position, length, text, mention_id, dd_count). + has_spouse_candidates(person1_id text, person2_id text, sentence_id text, @@ -58,6 +98,20 @@ dd_delta_has_spouse_candidates(person1_id text, is_true boolean, dd_count int). +dd_new_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int). + +dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count) :- + has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count). + +dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count) :- + dd_delta_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count). + has_spouse_features(relation_id text, feature text, dd_count int). @@ -66,10 +120,22 @@ dd_delta_has_spouse_features(relation_id text, feature text, dd_count int). +dd_new_has_spouse_features(relation_id text, + feature text, + dd_count int). + +dd_new_has_spouse_features(relation_id, feature, dd_count) :- + has_spouse_features(relation_id, feature, dd_count). + +dd_new_has_spouse_features(relation_id, feature, dd_count) :- + dd_delta_has_spouse_features(relation_id, feature, dd_count). + has_spouse?(relation_id text). dd_delta_has_spouse?(relation_id text). +dd_new_has_spouse?(relation_id text). + dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). ext_people_input(sentence_id text, @@ -82,6 +148,17 @@ dd_delta_ext_people_input(sentence_id text, ner_tags text[], dd_count int). +dd_new_ext_people_input(sentence_id text, + words text[], + ner_tags text[], + dd_count int). + +dd_new_ext_people_input(sentence_id, words, ner_tags, dd_count) :- + ext_people_input(sentence_id, words, ner_tags, dd_count). + +dd_new_ext_people_input(sentence_id, words, ner_tags, dd_count) :- + dd_delta_ext_people_input(sentence_id, words, ner_tags, dd_count). + dd_delta_ext_people_input(s, words, ner_tags) :- dd_delta_sentences(a, b, words, c, d, e, ner_tags, f, s). @@ -107,16 +184,25 @@ dd_delta_ext_has_spouse_input(sentence_id text, p2_text text, dd_count int). +dd_new_ext_has_spouse_input(sentence_id text, + p1_id text, + p1_text text, + p2_id text, + p2_text text, + dd_count int). + +dd_new_ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count) :- + ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count). + +dd_new_ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count) :- + dd_delta_ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count). + dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- dd_delta_people_mentions(s, a, b, p1_text, p1_id), people_mentions(s, c, d, p2_text, p2_id). dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- - people_mentions(s, a, b, p1_text, p1_id), - dd_delta_people_mentions(s, c, d, p2_text, p2_id). - -dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- - dd_delta_people_mentions(s, a, b, p1_text, p1_id), + dd_new_people_mentions(s, a, b, p1_text, p1_id), dd_delta_people_mentions(s, c, d, p2_text, p2_id). function ext_has_spouse @@ -143,94 +229,42 @@ dd_delta_ext_has_spouse_features_input(words text[], p2_length int, dd_count int). -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). +dd_new_ext_has_spouse_features_input(words text[], + relation_id text, + p1_start_position int, + p1_length int, + p2_start_position int, + p2_length int, + dd_count int). -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). +dd_new_ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count) :- + ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count). -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). +dd_new_ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count) :- + dd_delta_ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), + dd_new_sentences(a, b, words, c, d, e, f, g, s), dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_new_sentences(a, b, words, c, d, e, f, g, s), + dd_new_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), people_mentions(s, p2idx, p2len, l, person2_id). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), - dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - dd_delta_sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), - dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), - dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - dd_delta_sentences(a, b, words, c, d, e, f, g, s), - dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + dd_new_sentences(a, b, words, c, d, e, f, g, s), + dd_new_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_new_people_mentions(s, p1idx, p1len, k, person1_id), dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). function ext_has_spouse_features @@ -245,12 +279,7 @@ dd_delta_has_spouse(rid) :- weight = f label = l. dd_delta_has_spouse(rid) :- - has_spouse_candidates(a, b, c, d, rid, l), - dd_delta_has_spouse_features(rid, f) - weight = f - label = l. -dd_delta_has_spouse(rid) :- - dd_delta_has_spouse_candidates(a, b, c, d, rid, l), + dd_new_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f label = l. From 6e42fe4c9383c0acbf4fee26f021c80e17dc70fd Mon Sep 17 00:00:00 2001 From: senwu Date: Mon, 18 May 2015 23:41:27 -0700 Subject: [PATCH 075/347] modify spouse example --- examples/spouse_example.compile.expected | 2 +- examples/spouse_example.ddl | 3 ++- examples/spouse_example.print.expected | 3 ++- examples/spouse_incremental_example.ddl | 3 ++- examples/spouse_incremental_example.print.expected | 6 ++++-- 5 files changed, 11 insertions(+), 6 deletions(-) diff --git a/examples/spouse_example.compile.expected b/examples/spouse_example.compile.expected index 10a58e063..2d2b39a35 100644 --- a/examples/spouse_example.compile.expected +++ b/examples/spouse_example.compile.expected @@ -96,7 +96,7 @@ deepdive.inference.factors.factor_has_spouse { input_query: """ - SELECT R0.id AS "has_spouse.R0.id" , R0.label AS "has_spouse.R0.label" , R2.feature AS "has_spouse_features.R2.feature" + SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(has_spouse.R0.label)" diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index ac5546486..94a0266e6 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -92,4 +92,5 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f -label = l. +label = l +rule = imply. diff --git a/examples/spouse_example.print.expected b/examples/spouse_example.print.expected index 4aedd134f..ba4d0d933 100644 --- a/examples/spouse_example.print.expected +++ b/examples/spouse_example.print.expected @@ -87,4 +87,5 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f - label = l. + label = l + rule = imply. diff --git a/examples/spouse_incremental_example.ddl b/examples/spouse_incremental_example.ddl index cf467ac8c..11503d7d9 100644 --- a/examples/spouse_incremental_example.ddl +++ b/examples/spouse_incremental_example.ddl @@ -100,4 +100,5 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f -label = l. +label = l +rule = imply. diff --git a/examples/spouse_incremental_example.print.expected b/examples/spouse_incremental_example.print.expected index 93fe809c5..b47c1eb17 100644 --- a/examples/spouse_incremental_example.print.expected +++ b/examples/spouse_incremental_example.print.expected @@ -277,9 +277,11 @@ dd_delta_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f - label = l. + label = l + rule = imply. dd_delta_has_spouse(rid) :- dd_new_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f - label = l. + label = l + rule = imply. From 6ba3030c8c5f1989a4bfa2bfcd82cc23a27265b1 Mon Sep 17 00:00:00 2001 From: senwu Date: Mon, 18 May 2015 23:42:06 -0700 Subject: [PATCH 076/347] add rule in inference rule to support different rules --- DeepDiveLogCompiler.scala | 4 ++-- DeepDiveLogDeltaDeriver.scala | 2 +- DeepDiveLogParser.scala | 13 ++++++++----- DeepDiveLogPrettyPrinter.scala | 4 +++- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 13f08c214..31fcc264b 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -99,7 +99,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C ground_relations += { r -> !isQuery } // record whether a query or a ground term. } case ExtractionRule(_) => () - case InferenceRule(_,_,_) => () + case InferenceRule(_,_,_,_) => () case fdecl : FunctionDeclaration => function_schema += {fdecl.functionName -> fdecl} case FunctionCallRule(_,_,_) => () } @@ -446,7 +446,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var schema = Set[String]() // generate the statements. statements.foreach { - case InferenceRule(q, weights, supervision) => + case InferenceRule(q, weights, supervision, rule) => val qs = new QuerySchema(q) schema += s"${q.head.name}.label: Boolean" case _ => () diff --git a/DeepDiveLogDeltaDeriver.scala b/DeepDiveLogDeltaDeriver.scala index bc79f1c11..a80045d47 100644 --- a/DeepDiveLogDeltaDeriver.scala +++ b/DeepDiveLogDeltaDeriver.scala @@ -168,7 +168,7 @@ object DeepDiveLogDeltaDeriver{ else if (j == i) newStmtCqBody += ddDeltaStmtCqBody(j) } - incrementalStatement += InferenceRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBody.toList), stmt.weights, stmt.supervision) + incrementalStatement += InferenceRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBody.toList), stmt.weights, stmt.supervision, stmt.rule) } incrementalStatement.toList } diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index d48732625..7329f56a2 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -36,7 +36,7 @@ case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statem case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration]) extends Statement case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String) extends Statement // Weighted rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String, rule : String) extends Statement // Weighted rule // Parser @@ -58,6 +58,7 @@ class DeepDiveLogParser extends JavaTokenParsers { } def variableName = ident def functionName = ident + def ruleType = ident def columnDeclaration: Parser[Column] = columnName ~ columnType ^^ { @@ -144,14 +145,16 @@ class DeepDiveLogParser extends JavaTokenParsers { def supervision = "label" ~> "=" ~> variableName + def rule = "rule" ~> "=" ~> ruleType + def inferenceRule : Parser[InferenceRule] = - ( conjunctiveQuery ~ factorWeight ~ supervision + ( conjunctiveQuery ~ factorWeight ~ supervision ~ rule ) ^^ { - case (q ~ weight ~ supervision) => - InferenceRule(q, weight, supervision) + case (q ~ weight ~ supervision ~ rule) => + InferenceRule(q, weight, supervision, rule) } - // rules or schema elements in aribitrary order + // rules or schema elements in arbitrary order def statement : Parser[Statement] = ( schemaDeclaration | inferenceRule | extractionRule diff --git a/DeepDiveLogPrettyPrinter.scala b/DeepDiveLogPrettyPrinter.scala index 1bfe0fe8e..d387d9fc8 100644 --- a/DeepDiveLogPrettyPrinter.scala +++ b/DeepDiveLogPrettyPrinter.scala @@ -74,7 +74,9 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { ( if (stmt.supervision == null) "" else "\n label = " + stmt.supervision ) + - "." + ( if (stmt.rule == null) "" + else "\n rule = " + stmt.rule + ) + "." } override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { From cf493e2993e1574e8223e201c575e37474826cce Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 19 May 2015 17:40:47 -0700 Subject: [PATCH 077/347] Rebases incremental test cases to the original example --- ...ouse_example.compile-incremental.expected} | 0 ...spouse_example.print-incremental.expected} | 0 examples/spouse_incremental_example.ddl | 103 ------------------ test/per-example.bats | 23 +++- 4 files changed, 20 insertions(+), 106 deletions(-) rename examples/{spouse_incremental_example.compile.expected => spouse_example.compile-incremental.expected} (100%) rename examples/{spouse_incremental_example.print.expected => spouse_example.print-incremental.expected} (100%) delete mode 100644 examples/spouse_incremental_example.ddl diff --git a/examples/spouse_incremental_example.compile.expected b/examples/spouse_example.compile-incremental.expected similarity index 100% rename from examples/spouse_incremental_example.compile.expected rename to examples/spouse_example.compile-incremental.expected diff --git a/examples/spouse_incremental_example.print.expected b/examples/spouse_example.print-incremental.expected similarity index 100% rename from examples/spouse_incremental_example.print.expected rename to examples/spouse_example.print-incremental.expected diff --git a/examples/spouse_incremental_example.ddl b/examples/spouse_incremental_example.ddl deleted file mode 100644 index cf467ac8c..000000000 --- a/examples/spouse_incremental_example.ddl +++ /dev/null @@ -1,103 +0,0 @@ -articles( - article_id text, - text text, - dd_count int). - -sentences( - document_id text, - sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], - sentence_offset int, - sentence_id text, - dd_count int). - -people_mentions( - sentence_id text, - start_position int, - length int, - text text, - mention_id text, - dd_count int). - -has_spouse_candidates( - person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text, - is_true boolean, - dd_count int). - -has_spouse_features( - relation_id text, - feature text, - dd_count int). - -has_spouse?(relation_id text). - -people_mentions :- - !ext_people(ext_people_input). - -ext_people_input( - sentence_id text, - words text[], - ner_tags text[], - dd_count int). - -ext_people_input(s, words, ner_tags) :- - sentences(a, b, words, c, d, e, ner_tags, f, s). - -function ext_people over like ext_people_input - returns like people_mentions - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. - -has_spouse_candidates :- - !ext_has_spouse(ext_has_spouse_input). - -ext_has_spouse_input( - sentence_id text, - p1_id text, - p1_text text, - p2_id text, - p2_text text, - dd_count int). - -ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- - people_mentions(s, a, b, p1_text, p1_id), - people_mentions(s, c, d, p2_text, p2_id). - -function ext_has_spouse over like ext_has_spouse_input - returns like has_spouse_candidates - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. - -has_spouse_features :- - !ext_has_spouse_features(ext_has_spouse_features_input). - -ext_has_spouse_features_input( - words text[], - relation_id text, - p1_start_position int, - p1_length int, - p2_start_position int, - p2_length int, - dd_count int). - -ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). - -function ext_has_spouse_features over like ext_has_spouse_features_input - returns like has_spouse_features - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. - -has_spouse(rid) :- - has_spouse_candidates(a, b, c, d, rid, l), - has_spouse_features(rid, f) -weight = f -label = l. diff --git a/test/per-example.bats b/test/per-example.bats index b8400dd0b..42d459e05 100755 --- a/test/per-example.bats +++ b/test/per-example.bats @@ -16,6 +16,8 @@ setup() { [ -e "$EXAMPLE" ] } +## tests for basic compilation and pretty-printing + # compare the compiled output with what's expected @test "compile $EXAMPLE_NAME" { expectedOutput=$EXAMPLE_BASEPATH.compile.expected @@ -24,7 +26,7 @@ setup() { diff -u "$expectedOutput" - } -# compare the pretty-printed output with the input +# compare the pretty-printed output with what's expected @test "print $EXAMPLE_NAME as expected" { expectedOutput=$EXAMPLE_BASEPATH.print.expected [ -e "$expectedOutput" ] || skip @@ -41,6 +43,21 @@ setup() { } -# TODO incremental print +## tests for --incremental support + +# compare the compiled output of the incremental version with what's expected +@test "print $EXAMPLE_NAME as expected" { + expectedOutput=$EXAMPLE_BASEPATH.compile-incremental.expected + [ -e "$expectedOutput" ] || skip + scala "$DDLOG_JAR" compile --incremental "$EXAMPLE" | + diff -u "$expectedOutput" - +} + +# compare the pretty-printed output of the incremental version with what's expected +@test "print $EXAMPLE_NAME as expected" { + expectedOutput=$EXAMPLE_BASEPATH.print-incremental.expected + [ -e "$expectedOutput" ] || skip + scala "$DDLOG_JAR" print --incremental "$EXAMPLE" | + diff -u "$expectedOutput" - +} -# TODO incremental compile From bf27fc63b10bf18078423410122ee77e68b1c735 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 19 May 2015 17:45:22 -0700 Subject: [PATCH 078/347] Reorgs into a standard scala project structure with package name org.deepdive.ddlog --- Makefile | 2 +- .../main/scala/org/deepdive/ddlog/DeepDiveLog.scala | 2 ++ .../main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 2 ++ .../main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala | 2 ++ .../main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala | 2 ++ .../scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala | 2 ++ 6 files changed, 11 insertions(+), 1 deletion(-) rename DeepDiveLog.scala => src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala (98%) rename DeepDiveLogCompiler.scala => src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala (99%) rename DeepDiveLogDeltaDeriver.scala => src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala (99%) rename DeepDiveLogParser.scala => src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala (99%) rename DeepDiveLogPrettyPrinter.scala => src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala (99%) diff --git a/Makefile b/Makefile index 9f6d4a8f7..32c9c3e24 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ test-package: $(JAR) $(MAKE) test TEST_JAR=$< # build test jar -$(TEST_JAR): $(wildcard *.scala) +$(TEST_JAR): $(wildcard src/main/scala/org/deepdive/ddlog/*.scala) sbt package ln -sfn $$(ls -t target/scala-*/*_*.jar | head -1) $@ touch $@ diff --git a/DeepDiveLog.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala similarity index 98% rename from DeepDiveLog.scala rename to src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala index 859128685..df647b932 100644 --- a/DeepDiveLog.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala @@ -1,3 +1,5 @@ +package org.deepdive.ddlog + // A command-line interface object DeepDiveLog { type Program = List[Statement] diff --git a/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala similarity index 99% rename from DeepDiveLogCompiler.scala rename to src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 13f08c214..1f4e1e73a 100644 --- a/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -1,3 +1,5 @@ +package org.deepdive.ddlog + // DeepDiveLog compiler // See: https://docs.google.com/document/d/1SBIvvki3mnR28Mf0Pkin9w9mWNam5AA0SpIGj1ZN2c4 diff --git a/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala similarity index 99% rename from DeepDiveLogDeltaDeriver.scala rename to src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index bc79f1c11..8368f0428 100644 --- a/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -1,3 +1,5 @@ +package org.deepdive.ddlog + import scala.collection.mutable.ListBuffer object DeepDiveLogDeltaDeriver{ diff --git a/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala similarity index 99% rename from DeepDiveLogParser.scala rename to src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index d48732625..66a885812 100644 --- a/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -1,3 +1,5 @@ +package org.deepdive.ddlog + // DeepDiveLog syntax // See: https://docs.google.com/document/d/1SBIvvki3mnR28Mf0Pkin9w9mWNam5AA0SpIGj1ZN2c4 diff --git a/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala similarity index 99% rename from DeepDiveLogPrettyPrinter.scala rename to src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 1bfe0fe8e..f9e6ea214 100644 --- a/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -1,3 +1,5 @@ +package org.deepdive.ddlog + import org.apache.commons.lang3.StringEscapeUtils // Pretty printer that simply prints the parsed input From 607153e661bf487cd66e72aa38e9b5b4dd6c5252 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Tue, 19 May 2015 17:57:10 -0700 Subject: [PATCH 079/347] Rewrites test.sh to generate all .bats first and run them at once --- .gitignore | 1 + ... => expected-output-test.bats.per-example} | 0 test/test.sh | 32 +++++++++---------- 3 files changed, 17 insertions(+), 16 deletions(-) rename test/{per-example.bats => expected-output-test.bats.per-example} (100%) mode change 100755 => 100644 diff --git a/.gitignore b/.gitignore index 8ec9fb651..67ba8778f 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ /ddlog-test.jar.classpath target /test/bats +/test/*.for-example-*.bats diff --git a/test/per-example.bats b/test/expected-output-test.bats.per-example old mode 100755 new mode 100644 similarity index 100% rename from test/per-example.bats rename to test/expected-output-test.bats.per-example diff --git a/test/test.sh b/test/test.sh index 722bed8da..6fe8347cc 100755 --- a/test/test.sh +++ b/test/test.sh @@ -7,20 +7,20 @@ PATH="$PWD/bats/bin:$PATH" type bats &>/dev/null || git clone https://github.com/sstephenson/bats.git -# run all .bats tests -c=0 -for t in *.bats; do - case $t in - *-example.bats) - # run bats test for every example - for ddl in ../examples/*.ddl; do - EXAMPLE=$ddl bats $t || c=$? - done - ;; - - *) - # otherwise, simply run the bats - bats $t - esac +# generate bats tests for every per-example templates +for t in *.bats.per-example; do + testName=${t%.bats.per-example} + # generate one for each example + for ddl in ../examples/*.ddl; do + exampleName=${ddl%.ddl} + exampleName=${exampleName#../examples/} + batsFile="$testName".for-example-"$exampleName".bats + { + printf "EXAMPLE=%q\n" "$ddl" + cat $t + } >$batsFile + done done -exit $c + +# run all .bats tests +bats *.bats From 4770a263859da0d6782d3ab40492a5d3f8bfe0c3 Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 19 May 2015 21:34:25 -0700 Subject: [PATCH 080/347] modify spouse example --- examples/spouse_example.compile.expected | 22 +-- ...pouse_incremental_example.compile.expected | 169 +++++++++--------- .../spouse_incremental_example.print.expected | 54 ++---- examples/test.ddl | 8 + examples/test6.ddl | 5 + examples/test7.ddl | 6 + examples/test8.ddl | 6 + examples/test9.ddl | 9 + 8 files changed, 139 insertions(+), 140 deletions(-) create mode 100644 examples/test.ddl create mode 100644 examples/test6.ddl create mode 100644 examples/test7.ddl create mode 100644 examples/test8.ddl create mode 100644 examples/test9.ddl diff --git a/examples/spouse_example.compile.expected b/examples/spouse_example.compile.expected index 2d2b39a35..2dfb73e7c 100644 --- a/examples/spouse_example.compile.expected +++ b/examples/spouse_example.compile.expected @@ -18,8 +18,8 @@ deepdive.extraction.extractors.extraction_rule_8 { sql: """ DROP VIEW IF EXISTS ext_people_input; CREATE VIEW ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" - FROM sentences R0 + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + FROM sentences R0 """ style: "sql_extractor" @@ -30,8 +30,8 @@ deepdive.extraction.extractors.extraction_rule_16 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; CREATE VIEW ext_has_spouse_features_input AS - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" - FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" @@ -42,8 +42,8 @@ deepdive.extraction.extractors.extraction_rule_12 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; CREATE VIEW ext_has_spouse_input AS - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" - FROM people_mentions R0, people_mentions R1 + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + FROM people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" @@ -85,19 +85,19 @@ sql: """ DROP TABLE IF EXISTS has_spouse; CREATE TABLE has_spouse AS SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label - FROM has_spouse_candidates R0, has_spouse_features R1 + FROM has_spouse_candidates R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id - + """ style: "sql_extractor" dependencies: [ "extraction_rule_10" , "extraction_rule_14" ] } - deepdive.inference.factors.factor_has_spouse { + deepdive.inference.factors.factor_has_spouse_0 { input_query: """ - SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" - FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 + SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" + FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" diff --git a/examples/spouse_incremental_example.compile.expected b/examples/spouse_incremental_example.compile.expected index d376e5097..415587dc8 100644 --- a/examples/spouse_incremental_example.compile.expected +++ b/examples/spouse_incremental_example.compile.expected @@ -15,65 +15,65 @@ } - deepdive.extraction.extractors.extraction_rule_51 { + deepdive.extraction.extractors.extraction_rule_42 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_28" , "extraction_rule_8" , "extraction_rule_36" , "extraction_rule_13" , "extraction_rule_18" ] + dependencies: [ "extraction_rule_23" , "extraction_rule_15" , "extraction_rule_7" , "extraction_rule_11" , "extraction_rule_30" ] } - deepdive.extraction.extractors.extraction_rule_13 { + deepdive.extraction.extractors.extraction_rule_11 { sql: """ DROP VIEW IF EXISTS dd_new_people_mentions; CREATE VIEW dd_new_people_mentions AS - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.start_position AS "people_mentions.R0.start_position" , R0.length AS "people_mentions.R0.length" , R0.text AS "people_mentions.R0.text" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.dd_count AS "people_mentions.R0.dd_count" , R0.dd_count AS "dd_count" - FROM people_mentions R0 + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + FROM people_mentions R0 UNION - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.start_position AS "dd_delta_people_mentions.R0.start_position" , R0.length AS "dd_delta_people_mentions.R0.length" , R0.text AS "dd_delta_people_mentions.R0.text" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.dd_count AS "dd_delta_people_mentions.R0.dd_count" , R0.dd_count AS "dd_count" - FROM dd_delta_people_mentions R0 + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + FROM dd_delta_people_mentions R0 """ style: "sql_extractor" - dependencies: [ "extraction_rule_28" ] + dependencies: [ "extraction_rule_23" ] } - deepdive.extraction.extractors.extraction_rule_32 { + deepdive.extraction.extractors.extraction_rule_27 { sql: """ DROP VIEW IF EXISTS dd_new_ext_people_input; CREATE VIEW dd_new_ext_people_input AS - SELECT R0.sentence_id AS "ext_people_input.R0.sentence_id" , R0.words AS "ext_people_input.R0.words" , R0.ner_tags AS "ext_people_input.R0.ner_tags" , R0.dd_count AS "ext_people_input.R0.dd_count" , R0.dd_count AS "dd_count" - FROM ext_people_input R0 + SELECT R0.sentence_id, R0.words, R0.ner_tags, R0.dd_count + FROM ext_people_input R0 UNION - SELECT R0.sentence_id AS "dd_delta_ext_people_input.R0.sentence_id" , R0.words AS "dd_delta_ext_people_input.R0.words" , R0.ner_tags AS "dd_delta_ext_people_input.R0.ner_tags" , R0.dd_count AS "dd_delta_ext_people_input.R0.dd_count" , R0.dd_count AS "dd_count" - FROM dd_delta_ext_people_input R0 + SELECT R0.sentence_id, R0.words, R0.ner_tags, R0.dd_count + FROM dd_delta_ext_people_input R0 """ style: "sql_extractor" - dependencies: [ "extraction_rule_34" ] + dependencies: [ "extraction_rule_28" ] } deepdive.extraction.extractors.extraction_rule_3 { sql: """ DROP VIEW IF EXISTS dd_new_articles; CREATE VIEW dd_new_articles AS - SELECT R0.article_id AS "articles.R0.article_id" , R0.text AS "articles.R0.text" , R0.dd_count AS "articles.R0.dd_count" , R0.dd_count AS "dd_count" - FROM articles R0 + SELECT R0.article_id, R0.text, R0.dd_count + FROM articles R0 UNION - SELECT R0.article_id AS "dd_delta_articles.R0.article_id" , R0.text AS "dd_delta_articles.R0.text" , R0.dd_count AS "dd_delta_articles.R0.dd_count" , R0.dd_count AS "dd_count" - FROM dd_delta_articles R0 + SELECT R0.article_id, R0.text, R0.dd_count + FROM dd_delta_articles R0 """ style: "sql_extractor" @@ -81,11 +81,11 @@ } - deepdive.extraction.extractors.extraction_rule_34 { + deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS - SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" - FROM dd_delta_sentences R0 + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + FROM dd_delta_sentences R0 """ style: "sql_extractor" @@ -93,74 +93,74 @@ } - deepdive.extraction.extractors.extraction_rule_18 { + deepdive.extraction.extractors.extraction_rule_15 { sql: """ DROP VIEW IF EXISTS dd_new_has_spouse_candidates; CREATE VIEW dd_new_has_spouse_candidates AS - SELECT R0.person1_id AS "has_spouse_candidates.R0.person1_id" , R0.person2_id AS "has_spouse_candidates.R0.person2_id" , R0.sentence_id AS "has_spouse_candidates.R0.sentence_id" , R0.description AS "has_spouse_candidates.R0.description" , R0.relation_id AS "has_spouse_candidates.R0.relation_id" , R0.is_true AS "has_spouse_candidates.R0.is_true" , R0.dd_count AS "has_spouse_candidates.R0.dd_count" , R0.dd_count AS "dd_count" - FROM has_spouse_candidates R0 + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + FROM has_spouse_candidates R0 UNION - SELECT R0.person1_id AS "dd_delta_has_spouse_candidates.R0.person1_id" , R0.person2_id AS "dd_delta_has_spouse_candidates.R0.person2_id" , R0.sentence_id AS "dd_delta_has_spouse_candidates.R0.sentence_id" , R0.description AS "dd_delta_has_spouse_candidates.R0.description" , R0.relation_id AS "dd_delta_has_spouse_candidates.R0.relation_id" , R0.is_true AS "dd_delta_has_spouse_candidates.R0.is_true" , R0.dd_count AS "dd_delta_has_spouse_candidates.R0.dd_count" , R0.dd_count AS "dd_count" - FROM dd_delta_has_spouse_candidates R0 + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + FROM dd_delta_has_spouse_candidates R0 """ style: "sql_extractor" - dependencies: [ "extraction_rule_36" ] + dependencies: [ "extraction_rule_30" ] } - deepdive.extraction.extractors.extraction_rule_49 { + deepdive.extraction.extractors.extraction_rule_41 { sql: """ DROP VIEW IF EXISTS dd_new_ext_has_spouse_features_input; CREATE VIEW dd_new_ext_has_spouse_features_input AS - SELECT R0.words AS "ext_has_spouse_features_input.R0.words" , R0.relation_id AS "ext_has_spouse_features_input.R0.relation_id" , R0.p1_start_position AS "ext_has_spouse_features_input.R0.p1_start_position" , R0.p1_length AS "ext_has_spouse_features_input.R0.p1_length" , R0.p2_start_position AS "ext_has_spouse_features_input.R0.p2_start_position" , R0.p2_length AS "ext_has_spouse_features_input.R0.p2_length" , R0.dd_count AS "ext_has_spouse_features_input.R0.dd_count" , R0.dd_count AS "dd_count" - FROM ext_has_spouse_features_input R0 + SELECT R0.words, R0.relation_id, R0.p1_start_position, R0.p1_length, R0.p2_start_position, R0.p2_length, R0.dd_count + FROM ext_has_spouse_features_input R0 UNION - SELECT R0.words AS "dd_delta_ext_has_spouse_features_input.R0.words" , R0.relation_id AS "dd_delta_ext_has_spouse_features_input.R0.relation_id" , R0.p1_start_position AS "dd_delta_ext_has_spouse_features_input.R0.p1_start_position" , R0.p1_length AS "dd_delta_ext_has_spouse_features_input.R0.p1_length" , R0.p2_start_position AS "dd_delta_ext_has_spouse_features_input.R0.p2_start_position" , R0.p2_length AS "dd_delta_ext_has_spouse_features_input.R0.p2_length" , R0.dd_count AS "dd_delta_ext_has_spouse_features_input.R0.dd_count" , R0.dd_count AS "dd_count" - FROM dd_delta_ext_has_spouse_features_input R0 + SELECT R0.words, R0.relation_id, R0.p1_start_position, R0.p1_length, R0.p2_start_position, R0.p2_length, R0.dd_count + FROM dd_delta_ext_has_spouse_features_input R0 """ style: "sql_extractor" - dependencies: [ "extraction_rule_51" ] + dependencies: [ "extraction_rule_42" ] } - deepdive.extraction.extractors.extraction_rule_40 { + deepdive.extraction.extractors.extraction_rule_34 { sql: """ DROP VIEW IF EXISTS dd_new_ext_has_spouse_input; CREATE VIEW dd_new_ext_has_spouse_input AS - SELECT R0.sentence_id AS "ext_has_spouse_input.R0.sentence_id" , R0.p1_id AS "ext_has_spouse_input.R0.p1_id" , R0.p1_text AS "ext_has_spouse_input.R0.p1_text" , R0.p2_id AS "ext_has_spouse_input.R0.p2_id" , R0.p2_text AS "ext_has_spouse_input.R0.p2_text" , R0.dd_count AS "ext_has_spouse_input.R0.dd_count" , R0.dd_count AS "dd_count" - FROM ext_has_spouse_input R0 + SELECT R0.sentence_id, R0.p1_id, R0.p1_text, R0.p2_id, R0.p2_text, R0.dd_count + FROM ext_has_spouse_input R0 UNION - SELECT R0.sentence_id AS "dd_delta_ext_has_spouse_input.R0.sentence_id" , R0.p1_id AS "dd_delta_ext_has_spouse_input.R0.p1_id" , R0.p1_text AS "dd_delta_ext_has_spouse_input.R0.p1_text" , R0.p2_id AS "dd_delta_ext_has_spouse_input.R0.p2_id" , R0.p2_text AS "dd_delta_ext_has_spouse_input.R0.p2_text" , R0.dd_count AS "dd_delta_ext_has_spouse_input.R0.dd_count" , R0.dd_count AS "dd_count" - FROM dd_delta_ext_has_spouse_input R0 + SELECT R0.sentence_id, R0.p1_id, R0.p1_text, R0.p2_id, R0.p2_text, R0.dd_count + FROM dd_delta_ext_has_spouse_input R0 """ style: "sql_extractor" - dependencies: [ "extraction_rule_42" ] + dependencies: [ "extraction_rule_35" ] } - deepdive.extraction.extractors.extraction_rule_23 { + deepdive.extraction.extractors.extraction_rule_19 { sql: """ DROP VIEW IF EXISTS dd_new_has_spouse_features; CREATE VIEW dd_new_has_spouse_features AS - SELECT R0.relation_id AS "has_spouse_features.R0.relation_id" , R0.feature AS "has_spouse_features.R0.feature" , R0.dd_count AS "has_spouse_features.R0.dd_count" , R0.dd_count AS "dd_count" - FROM has_spouse_features R0 + SELECT R0.relation_id, R0.feature, R0.dd_count + FROM has_spouse_features R0 UNION - SELECT R0.relation_id AS "dd_delta_has_spouse_features.R0.relation_id" , R0.feature AS "dd_delta_has_spouse_features.R0.feature" , R0.dd_count AS "dd_delta_has_spouse_features.R0.dd_count" , R0.dd_count AS "dd_count" - FROM dd_delta_has_spouse_features R0 + SELECT R0.relation_id, R0.feature, R0.dd_count + FROM dd_delta_has_spouse_features R0 """ style: "sql_extractor" - dependencies: [ "extraction_rule_45" ] + dependencies: [ "extraction_rule_37" ] } - deepdive.extraction.extractors.extraction_rule_8 { + deepdive.extraction.extractors.extraction_rule_7 { sql: """ DROP VIEW IF EXISTS dd_new_sentences; CREATE VIEW dd_new_sentences AS - SELECT R0.document_id AS "sentences.R0.document_id" , R0.sentence AS "sentences.R0.sentence" , R0.words AS "sentences.R0.words" , R0.lemma AS "sentences.R0.lemma" , R0.pos_tags AS "sentences.R0.pos_tags" , R0.dependencies AS "sentences.R0.dependencies" , R0.ner_tags AS "sentences.R0.ner_tags" , R0.sentence_offset AS "sentences.R0.sentence_offset" , R0.sentence_id AS "sentences.R0.sentence_id" , R0.dd_count AS "sentences.R0.dd_count" , R0.dd_count AS "dd_count" - FROM sentences R0 + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + FROM sentences R0 UNION - SELECT R0.document_id AS "dd_delta_sentences.R0.document_id" , R0.sentence AS "dd_delta_sentences.R0.sentence" , R0.words AS "dd_delta_sentences.R0.words" , R0.lemma AS "dd_delta_sentences.R0.lemma" , R0.pos_tags AS "dd_delta_sentences.R0.pos_tags" , R0.dependencies AS "dd_delta_sentences.R0.dependencies" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.sentence_offset AS "dd_delta_sentences.R0.sentence_offset" , R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.dd_count AS "dd_delta_sentences.R0.dd_count" , R0.dd_count AS "dd_count" - FROM dd_delta_sentences R0 + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + FROM dd_delta_sentences R0 """ style: "sql_extractor" @@ -168,48 +168,48 @@ } - deepdive.extraction.extractors.extraction_rule_42 { + deepdive.extraction.extractors.extraction_rule_35 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" - FROM dd_delta_people_mentions R0, people_mentions R1 + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_delta_people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id UNION - SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" - FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_28" , "extraction_rule_13" ] + dependencies: [ "extraction_rule_23" , "extraction_rule_11" ] } - deepdive.extraction.extractors.extraction_rule_28 { + deepdive.extraction.extractors.extraction_rule_23 { input: """ SELECT * FROM dd_delta_ext_people_input """ output_relation: "dd_delta_people_mentions" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_34" ] + dependencies: [ "extraction_rule_28" ] } - deepdive.extraction.extractors.extraction_rule_45 { + deepdive.extraction.extractors.extraction_rule_37 { input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input """ output_relation: "dd_delta_has_spouse_features" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_51" ] + dependencies: [ "extraction_rule_42" ] } - deepdive.extraction.extractors.extraction_rule_36 { + deepdive.extraction.extractors.extraction_rule_30 { input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ output_relation: "dd_delta_has_spouse_candidates" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_42" ] + dependencies: [ "extraction_rule_35" ] } @@ -217,34 +217,27 @@ sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse; CREATE TABLE dd_delta_has_spouse AS SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count - FROM dd_delta_has_spouse_candidates R0, has_spouse_features R1 + FROM dd_delta_has_spouse_candidates R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id - UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count - FROM dd_new_has_spouse_candidates R0, dd_delta_has_spouse_features R1 + UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count + FROM dd_new_has_spouse_candidates R0, dd_delta_has_spouse_features R1 WHERE R1.relation_id = R0.relation_id - + """ style: "sql_extractor" - dependencies: [ "extraction_rule_36" , "extraction_rule_18" , "extraction_rule_45" ] + dependencies: [ "extraction_rule_30" , "extraction_rule_15" , "extraction_rule_37" ] } - deepdive.inference.factors.factor_dd_delta_has_spouse { + deepdive.inference.factors.factor_dd_delta_has_spouse_0 { input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(dd_delta_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } - - deepdive.inference.factors.factor_dd_delta_has_spouse { - input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_has_spouse.R0.label)" - weight: "?(dd_delta_has_spouse_features.R2.feature)" - } - diff --git a/examples/spouse_incremental_example.print.expected b/examples/spouse_incremental_example.print.expected index b47c1eb17..0b2a5e9e2 100644 --- a/examples/spouse_incremental_example.print.expected +++ b/examples/spouse_incremental_example.print.expected @@ -11,9 +11,7 @@ dd_new_articles(article_id text, dd_count int). dd_new_articles(article_id, text, dd_count) :- - articles(article_id, text, dd_count). - -dd_new_articles(article_id, text, dd_count) :- + articles(article_id, text, dd_count); dd_delta_articles(article_id, text, dd_count). sentences(document_id text, @@ -50,9 +48,7 @@ dd_new_sentences(document_id text, dd_count int). dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count) :- - sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count). - -dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count) :- + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count); dd_delta_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count). people_mentions(sentence_id text, @@ -77,9 +73,7 @@ dd_new_people_mentions(sentence_id text, dd_count int). dd_new_people_mentions(sentence_id, start_position, length, text, mention_id, dd_count) :- - people_mentions(sentence_id, start_position, length, text, mention_id, dd_count). - -dd_new_people_mentions(sentence_id, start_position, length, text, mention_id, dd_count) :- + people_mentions(sentence_id, start_position, length, text, mention_id, dd_count); dd_delta_people_mentions(sentence_id, start_position, length, text, mention_id, dd_count). has_spouse_candidates(person1_id text, @@ -107,9 +101,7 @@ dd_new_has_spouse_candidates(person1_id text, dd_count int). dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count) :- - has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count). - -dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count) :- + has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count); dd_delta_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count). has_spouse_features(relation_id text, @@ -125,9 +117,7 @@ dd_new_has_spouse_features(relation_id text, dd_count int). dd_new_has_spouse_features(relation_id, feature, dd_count) :- - has_spouse_features(relation_id, feature, dd_count). - -dd_new_has_spouse_features(relation_id, feature, dd_count) :- + has_spouse_features(relation_id, feature, dd_count); dd_delta_has_spouse_features(relation_id, feature, dd_count). has_spouse?(relation_id text). @@ -154,9 +144,7 @@ dd_new_ext_people_input(sentence_id text, dd_count int). dd_new_ext_people_input(sentence_id, words, ner_tags, dd_count) :- - ext_people_input(sentence_id, words, ner_tags, dd_count). - -dd_new_ext_people_input(sentence_id, words, ner_tags, dd_count) :- + ext_people_input(sentence_id, words, ner_tags, dd_count); dd_delta_ext_people_input(sentence_id, words, ner_tags, dd_count). dd_delta_ext_people_input(s, words, ner_tags) :- @@ -192,16 +180,12 @@ dd_new_ext_has_spouse_input(sentence_id text, dd_count int). dd_new_ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count) :- - ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count). - -dd_new_ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count) :- + ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count); dd_delta_ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count). dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- dd_delta_people_mentions(s, a, b, p1_text, p1_id), - people_mentions(s, c, d, p2_text, p2_id). - -dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + people_mentions(s, c, d, p2_text, p2_id); dd_new_people_mentions(s, a, b, p1_text, p1_id), dd_delta_people_mentions(s, c, d, p2_text, p2_id). @@ -238,30 +222,22 @@ dd_new_ext_has_spouse_features_input(words text[], dd_count int). dd_new_ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count) :- - ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count). - -dd_new_ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count) :- + ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count); dd_delta_ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count). dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + people_mentions(s, p2idx, p2len, l, person2_id); dd_new_sentences(a, b, words, c, d, e, f, g, s), dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + people_mentions(s, p2idx, p2len, l, person2_id); dd_new_sentences(a, b, words, c, d, e, f, g, s), dd_new_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). - -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + people_mentions(s, p2idx, p2len, l, person2_id); dd_new_sentences(a, b, words, c, d, e, f, g, s), dd_new_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), dd_new_people_mentions(s, p1idx, p1len, k, person1_id), @@ -275,11 +251,7 @@ function ext_has_spouse_features dd_delta_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l), - has_spouse_features(rid, f) - weight = f - label = l - rule = imply. -dd_delta_has_spouse(rid) :- + has_spouse_features(rid, f); dd_new_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f diff --git a/examples/test.ddl b/examples/test.ddl new file mode 100644 index 000000000..bf6b427cf --- /dev/null +++ b/examples/test.ddl @@ -0,0 +1,8 @@ +A(a1 int, + a2 int). +B(a1 int, + a2 int). +C(a1 int, + a2 int, + a3 int). +Q(a1) :- A(a1, x); B(y, a1); C(a, b, a1). diff --git a/examples/test6.ddl b/examples/test6.ddl new file mode 100644 index 000000000..35820ed03 --- /dev/null +++ b/examples/test6.ddl @@ -0,0 +1,5 @@ +R(a int, b int). +S(a int, b int). +Q(x int). + +Q(y) :- R(x, y); R(x, y), S(y, z). \ No newline at end of file diff --git a/examples/test7.ddl b/examples/test7.ddl new file mode 100644 index 000000000..6c1c7f0fe --- /dev/null +++ b/examples/test7.ddl @@ -0,0 +1,6 @@ +R(a int, b int). +S(a int, b int). +T(a int, b int). +Q(x int). + +Q(y) :- R(x, y); R(x, y), S(y, z); S(y, x), T(x, z). \ No newline at end of file diff --git a/examples/test8.ddl b/examples/test8.ddl new file mode 100644 index 000000000..ecf3c4b7e --- /dev/null +++ b/examples/test8.ddl @@ -0,0 +1,6 @@ +R(a int, b int). +S(a int, b int, c int). +T(a int, b int, c int). +Q(x int, z int). + +Q(x, y) :- R(x, y); R(x, y), S(y, z, w); S(y, x, w), T(x, z, w). \ No newline at end of file diff --git a/examples/test9.ddl b/examples/test9.ddl new file mode 100644 index 000000000..e93f7ad0f --- /dev/null +++ b/examples/test9.ddl @@ -0,0 +1,9 @@ +R(a int, b int, c int). +S(a int, b int, c int). +T(a int, b int, c int). +Q?(x int). + +Q(x) :- R(x, y, z); R(x, y, z), S(y, z, w); S(y, x, w), T(x, z, w) +weight = y +label = z +rule = imply. \ No newline at end of file From cc7e3cdea66fe3aff178cf9fd36527b4ebdeb273 Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 19 May 2015 21:38:55 -0700 Subject: [PATCH 081/347] support disjunction --- DeepDiveLogCompiler.scala | 170 ++++++++++++++++++--------------- DeepDiveLogDeltaDeriver.scala | 132 +++++++++++++------------ DeepDiveLogParser.scala | 9 +- DeepDiveLogPrettyPrinter.scala | 5 +- 4 files changed, 176 insertions(+), 140 deletions(-) diff --git a/DeepDiveLogCompiler.scala b/DeepDiveLogCompiler.scala index 31fcc264b..6e1351322 100644 --- a/DeepDiveLogCompiler.scala +++ b/DeepDiveLogCompiler.scala @@ -146,25 +146,30 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } // Resolve a column name with alias - def resolveColumn(s: String, qs: QuerySchema, q : ConjunctiveQuery, alias: Boolean) : Option[String] = { + def resolveColumn(s: String, qs: QuerySchema, q : ConjunctiveQuery, alias: Int) : Option[String] = { val index = qs.getBodyIndex(s) val name = resolveName(qs.getVar(s)) - val relation = q.body(index).name - if (alias) - Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) - else - Some(s"${relation}.R${index}.${name}") + val relation = q.bodies(0)(index).name + // Three kinds of column expressions: + // 0 => use alias for unknown weight in inference + // 1 => use column name and alias for normal extraction rule + // 2 => use column name for dd_new_ tables + alias match { + case 0 => Some(s"${relation}.R${index}.${name}") + case 1 => Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) + case 2 => Some(s"R${index}.${name}") + } } // This is generic code that generates the FROM with positional aliasing R0, R1, etc. // and the corresponding WHERE clause (equating all variables) def generateSQLBody(z : ConjunctiveQuery) : String = { - val bodyNames = ( z.body.zipWithIndex map { case(x,i) => s"${x.name} R${i}"}).mkString(", ") + val bodyNames = ( z.bodies(0).zipWithIndex map { case(x,i) => s"${x.name} R${i}"}).mkString(", ") // Simple logic for the where clause, first find every first occurence of a // and stick it in a map. val qs = new QuerySchema(z) - val whereClause = z.body.zipWithIndex flatMap { + val whereClause = z.bodies(0).zipWithIndex flatMap { case (Atom(relName, terms),body_index) => terms flatMap { case Variable(varName, relName, index) => @@ -219,8 +224,8 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // Look at the body of each statement to construct a dependency graph statements foreach { case f : FunctionCallRule => dependencies += { f -> (( Some(f.input) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } - case e : ExtractionRule => dependencies += { e -> ((e.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } - case w : InferenceRule => dependencies += { w -> ((w.q.body map (_.name) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } + case e : ExtractionRule => dependencies += { e -> ((e.q.bodies.flatten map (_.name) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } + case w : InferenceRule => dependencies += { w -> ((w.q.bodies.flatten map (_.name) flatMap (stmtByHeadName get _)).toSet.flatten.flatten) } case _ => } } @@ -248,7 +253,7 @@ class QuerySchema(q : ConjunctiveQuery) { // index is the index of the subgoal/atom this variable is found in the body. // variable is the complete Variable type for the found variable. def generateCanonicalVar() = { - q.body.zipWithIndex.foreach { + q.bodies(0).zipWithIndex.foreach { case (Atom(relName,terms),index) => { terms.foreach { case Variable(v, r, i) => @@ -275,21 +280,27 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { def compileExtractionRules(stmts: List[ExtractionRule], ss: CompilationState): CompiledBlocks = { var inputQueries = new ListBuffer[String]() for (stmt <- stmts) { - // Generate the body of the query. - val qs = new QuerySchema( stmt.q ) - // variable columns - val variableCols = stmt.q.head.terms flatMap { - case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, stmt.q, true) - } - - val selectStr = variableCols.mkString(", ") - val ddCount = if (ss.isIncremental) ( stmt.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" + for (cqBody <- stmt.q.bodies) { + var tmpStmt = ConjunctiveQuery(stmt.q.head, List(cqBody)) + // Generate the body of the query. + val qs = new QuerySchema( tmpStmt ) + // variable columns + var resolveColumnFlag = tmpStmt.head.name.startsWith("dd_new_") match { + case true => 2 + case false => 1 + } + val variableCols = tmpStmt.head.terms flatMap { + case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, tmpStmt, resolveColumnFlag) + } - inputQueries += s""" - SELECT ${selectStr}${ddCountStr} - ${ ss.generateSQLBody(stmt.q) }""" + val selectStr = variableCols.mkString(", ") + val ddCount = if (ss.isIncremental && (resolveColumnFlag == 1)) ( tmpStmt.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" + inputQueries += s""" + SELECT ${selectStr}${ddCountStr} + ${ ss.generateSQLBody(tmpStmt) }""" + } } val blockName = ss.resolveExtractorBlockName(stmts(0)) val extractor = s""" @@ -340,25 +351,28 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // generate inference rule part for deepdive def compileInferenceRules(stmts: List[InferenceRule], ss: CompilationState): CompiledBlocks = { var blocks = List[String]() - val qs = new QuerySchema( stmts(0).q ) // node query // generate the node portion (V) of the factor graph - def compileNodeRule(zs: List[InferenceRule], qs: QuerySchema, ss: CompilationState) : CompiledBlocks = { + def compileNodeRule(zs: List[InferenceRule], ss: CompilationState) : CompiledBlocks = { var inputQueries = new ListBuffer[String]() for (z <- zs) { - val headTerms = z.q.head.terms map { - case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" + for (cqBody <- z.q.bodies) { + var tmpStmt = ConjunctiveQuery(z.q.head, List(cqBody)) + val qs = new QuerySchema(tmpStmt) + val headTerms = tmpStmt.head.terms map { + case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" + } + val index = qs.getBodyIndex(z.supervision) + val name = ss.resolveName(qs.getVar(z.supervision)) + val labelCol = s"R${index}.${name}" + val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") + val ddCount = if (ss.isIncremental) ( tmpStmt.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" + + inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} + ${ ss.generateSQLBody(tmpStmt) } + """ } - val index = qs.getBodyIndex(z.supervision) - val name = ss.resolveName(qs.getVar(z.supervision)) - val labelCol = s"R${index}.${name}" - val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") - val ddCount = if (ss.isIncremental) ( z.q.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" - - inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} - ${ ss.generateSQLBody(z.q) } - """ } val blockName = ss.resolveExtractorBlockName(zs(0)) val ext = s""" @@ -374,53 +388,59 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { List(ext) } if (ss.isQueryTerm(stmts(0).q.head.name)) - blocks :::= compileNodeRule(stmts, qs, ss) + blocks :::= compileNodeRule(stmts, ss) val inferenceRuleGroupByHead = stmts.groupBy(_.q.head.name) - + var stmtIndex = 0 for (stmt <- stmts) { - // edge query - val fakeBody = stmt.q.head +: stmt.q.body - val fakeCQ = ConjunctiveQuery(stmt.q.head, fakeBody) // we will just use the fakeBody below. - - val index = stmt.q.body.length + 1 - val qs2 = new QuerySchema( fakeCQ ) - val variableIdsStr = Some(s"""R0.id AS "${stmt.q.head.name}.R0.id" """) - - // weight string - val uwStr = stmt.weights match { - case KnownFactorWeight(x) => None - case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, true)).mkString(", ")) - } - - val selectStr = (List(variableIdsStr, uwStr) flatten).mkString(", ") - - val ddCount = if (ss.isIncremental) ( fakeCQ.body.zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" - - // factor input query - val inputQuery = s""" - SELECT ${selectStr} ${ddCountStr} - ${ ss.generateSQLBody(fakeCQ) }""" - - // factor function - val func = s"""Imply(${stmt.q.head.name}.R0.label)""" - - // weight - val weight = stmt.weights match { - case KnownFactorWeight(x) => s"${x}" - case UnknownFactorWeight(w) => { - s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, false)).mkString(", ")})""" + var inputQueries = new ListBuffer[String]() + var func = "" + var weight = "" + for (cqBody <- stmt.q.bodies) { + var tmpStmt = ConjunctiveQuery(stmt.q.head, List(cqBody)) + // edge query + val fakeBody = tmpStmt.head +: tmpStmt.bodies(0) + // val fakeBody = stmt.q.bodies +: List(stmt.q.head) + val fakeCQ = ConjunctiveQuery(tmpStmt.head, List(fakeBody)) // we will just use the fakeBody below. + + val index = tmpStmt.bodies(0).length + 1 + val qs2 = new QuerySchema( fakeCQ ) + val variableIdsStr = Some(s"""R0.id AS "${tmpStmt.head.name}.R0.id" """) + + // weight string + val uwStr = stmt.weights match { + case KnownFactorWeight(x) => None + case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, 1)).mkString(", ")) } - } + val selectStr = (List(variableIdsStr, uwStr) flatten).mkString(", ") + + val ddCount = if (ss.isIncremental) ( fakeCQ.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" + + // factor input query + inputQueries += s""" + SELECT ${selectStr} ${ddCountStr} + ${ ss.generateSQLBody(fakeCQ) }""" + // factor function + func = s"""Imply(${stmt.q.head.name}.R0.label)""" + // weight + if (weight.length == 0) + weight = stmt.weights match { + case KnownFactorWeight(x) => s"${x}" + case UnknownFactorWeight(w) => { + s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, 0)).mkString(", ")})""" + } + } + } blocks ::= s""" - deepdive.inference.factors.factor_${stmt.q.head.name} { - input_query: \"\"\"${inputQuery}\"\"\" + deepdive.inference.factors.factor_${stmt.q.head.name}_${stmtIndex} { + input_query: \"\"\"${inputQueries.mkString(" UNION ")}\"\"\" function: "${func}" weight: "${weight}" } """ + stmtIndex += 1 } blocks.reverse } diff --git a/DeepDiveLogDeltaDeriver.scala b/DeepDiveLogDeltaDeriver.scala index a80045d47..7bece44f3 100644 --- a/DeepDiveLogDeltaDeriver.scala +++ b/DeepDiveLogDeltaDeriver.scala @@ -36,8 +36,12 @@ object DeepDiveLogDeltaDeriver{ var newStmt = SchemaDeclaration(Attribute(newPrefix + stmt.a.name, newTerms.toList, stmt.a.types), stmt.isQuery) incrementalStatement += newStmt if (!stmt.isQuery) { - incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(newStmt.a.name, newStmt.a.terms.toList), List(Atom(stmt.a.name, stmt.a.terms.toList)))) - incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(newStmt.a.name, newStmt.a.terms.toList), List(Atom(deltaStmt.a.name, deltaStmt.a.terms.toList)))) + var newStmtCqBodies = new ListBuffer[List[Atom]]() + newStmtCqBodies += List(Atom(stmt.a.name, stmt.a.terms.toList)) + newStmtCqBodies += List(Atom(deltaStmt.a.name, deltaStmt.a.terms.toList)) + incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(newStmt.a.name, newStmt.a.terms.toList), newStmtCqBodies.toList)) + + // incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(newStmt.a.name, newStmt.a.terms.toList), List(Atom(deltaStmt.a.name, deltaStmt.a.terms.toList)))) } incrementalStatement.toList } @@ -80,40 +84,44 @@ object DeepDiveLogDeltaDeriver{ newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) } var newStmtCqHead = Atom(deltaPrefix + stmt.q.head.name, newStmtCqHeadTerms.toList) - // dd delta table from dd_delta_ table - var ddDeltaStmtCqBody = new ListBuffer[Atom]() - for (stmtCqBody <- stmt.q.body) { // List[Atom] - var stmtCqBodyTerms = new ListBuffer[Variable]() - for (bodyTerm <- stmtCqBody.terms) { - stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) + var newStmtCqBodies = new ListBuffer[List[Atom]]() + for (stmtCqBody <- stmt.q.bodies) { + // dd delta table from dd_delta_ table + var ddDeltaStmtCqBody = new ListBuffer[Atom]() + for (cqBody <- stmtCqBody) { // List[Atom] + var stmtCqBodyTerms = new ListBuffer[Variable]() + for (bodyTerm <- cqBody.terms) { + stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) + } + ddDeltaStmtCqBody += Atom(deltaPrefix + cqBody.name, stmtCqBodyTerms.toList) } - ddDeltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) - } - // dd new body from dd_new_ table - var ddNewStmtCqBody = new ListBuffer[Atom]() - for (stmtCqBody <- stmt.q.body) { // List[Atom] - var stmtCqBodyTerms = new ListBuffer[Variable]() - for (bodyTerm <- stmtCqBody.terms) { - stmtCqBodyTerms += Variable(bodyTerm.varName, newPrefix + bodyTerm.relName, bodyTerm.index) + // dd new body from dd_new_ table + var ddNewStmtCqBody = new ListBuffer[Atom]() + for (cqBody <- stmtCqBody) { // List[Atom] + var stmtCqBodyTerms = new ListBuffer[Variable]() + for (bodyTerm <- cqBody.terms) { + stmtCqBodyTerms += Variable(bodyTerm.varName, newPrefix + bodyTerm.relName, bodyTerm.index) + } + ddNewStmtCqBody += Atom(newPrefix + cqBody.name, stmtCqBodyTerms.toList) } - ddNewStmtCqBody += Atom(newPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) - } - // New statement - var i = 0 - var j = 0 - for (i <- 0 to (stmt.q.body.length - 1)) { - var newStmtCqBody = new ListBuffer[Atom]() - for (j <- 0 to (stmt.q.body.length - 1)) { - if (j > i) - newStmtCqBody += stmt.q.body(j) - else if (j < i) - newStmtCqBody += ddNewStmtCqBody(j) - else if (j == i) - newStmtCqBody += ddDeltaStmtCqBody(j) + // New statement + var i = 0 + var j = 0 + for (i <- 0 to (stmtCqBody.length - 1)) { + var newStmtCqBody = new ListBuffer[Atom]() + for (j <- 0 to (stmtCqBody.length - 1)) { + if (j > i) + newStmtCqBody += stmtCqBody(j) + else if (j < i) + newStmtCqBody += ddNewStmtCqBody(j) + else if (j == i) + newStmtCqBody += ddDeltaStmtCqBody(j) + } + newStmtCqBodies += newStmtCqBody.toList } - incrementalStatement += ExtractionRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBody.toList)) } + incrementalStatement += ExtractionRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBodies.toList)) incrementalStatement.toList } @@ -136,40 +144,44 @@ object DeepDiveLogDeltaDeriver{ newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) } var newStmtCqHead = Atom(deltaPrefix + stmt.q.head.name, newStmtCqHeadTerms.toList) - // dd delta table from dd_delta_ table - var ddDeltaStmtCqBody = new ListBuffer[Atom]() - for (stmtCqBody <- stmt.q.body) { // List[Atom] - var stmtCqBodyTerms = new ListBuffer[Variable]() - for (bodyTerm <- stmtCqBody.terms) { - stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) + var newStmtCqBodies = new ListBuffer[List[Atom]]() + for (stmtCqBody <- stmt.q.bodies) { + // dd delta table from dd_delta_ table + var ddDeltaStmtCqBody = new ListBuffer[Atom]() + for (cqBody <- stmtCqBody) { // List[Atom] + var stmtCqBodyTerms = new ListBuffer[Variable]() + for (bodyTerm <- cqBody.terms) { + stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) + } + ddDeltaStmtCqBody += Atom(deltaPrefix + cqBody.name, stmtCqBodyTerms.toList) } - ddDeltaStmtCqBody += Atom(deltaPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) - } - // dd new body from dd_new_ table - var ddNewStmtCqBody = new ListBuffer[Atom]() - for (stmtCqBody <- stmt.q.body) { // List[Atom] - var stmtCqBodyTerms = new ListBuffer[Variable]() - for (bodyTerm <- stmtCqBody.terms) { - stmtCqBodyTerms += Variable(bodyTerm.varName, newPrefix + bodyTerm.relName, bodyTerm.index) + // dd new body from dd_new_ table + var ddNewStmtCqBody = new ListBuffer[Atom]() + for (cqBody <- stmtCqBody) { // List[Atom] + var stmtCqBodyTerms = new ListBuffer[Variable]() + for (bodyTerm <- cqBody.terms) { + stmtCqBodyTerms += Variable(bodyTerm.varName, newPrefix + bodyTerm.relName, bodyTerm.index) + } + ddNewStmtCqBody += Atom(newPrefix + cqBody.name, stmtCqBodyTerms.toList) } - ddNewStmtCqBody += Atom(newPrefix + stmtCqBody.name, stmtCqBodyTerms.toList) - } - // New statement - var i = 0 - var j = 0 - for (i <- 0 to (stmt.q.body.length - 1)) { - var newStmtCqBody = new ListBuffer[Atom]() - for (j <- 0 to (stmt.q.body.length - 1)) { - if (j > i) - newStmtCqBody += stmt.q.body(j) - else if (j < i) - newStmtCqBody += ddNewStmtCqBody(j) - else if (j == i) - newStmtCqBody += ddDeltaStmtCqBody(j) + // New statement + var i = 0 + var j = 0 + for (i <- 0 to (stmtCqBody.length - 1)) { + var newStmtCqBody = new ListBuffer[Atom]() + for (j <- 0 to (stmtCqBody.length - 1)) { + if (j > i) + newStmtCqBody += stmtCqBody(j) + else if (j < i) + newStmtCqBody += ddNewStmtCqBody(j) + else if (j == i) + newStmtCqBody += ddDeltaStmtCqBody(j) + } + newStmtCqBodies += newStmtCqBody.toList } - incrementalStatement += InferenceRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBody.toList), stmt.weights, stmt.supervision, stmt.rule) } + incrementalStatement += InferenceRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBodies.toList), stmt.weights, stmt.supervision, stmt.rule) incrementalStatement.toList } diff --git a/DeepDiveLogParser.scala b/DeepDiveLogParser.scala index 7329f56a2..0072c6c6d 100644 --- a/DeepDiveLogParser.scala +++ b/DeepDiveLogParser.scala @@ -11,7 +11,7 @@ case class Variable(varName : String, relName : String, index : Int ) // TODO make Atom a trait, and have multiple case classes, e.g., RelationAtom and CondExprAtom case class Atom(name : String, terms : List[Variable]) case class Attribute(name : String, terms : List[Variable], types : List[String]) -case class ConjunctiveQuery(head: Atom, body: List[Atom]) +case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]]) case class Column(name : String, t : String) sealed trait FactorWeight { @@ -36,7 +36,7 @@ case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statem case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration]) extends Statement case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String, rule : String) extends Statement // Weighted rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String, rule : String = "imply") extends Statement // Weighted rule // Parser @@ -99,8 +99,9 @@ class DeepDiveLogParser extends JavaTokenParsers { case (headatom ~ ":-" ~ disjunctiveBodies) => // TODO handle all disjunctiveBodies // XXX only compiling the first body - val bodyatoms = disjunctiveBodies(0) - ConjunctiveQuery(headatom, bodyatoms.toList) + // val bodyatoms = disjunctiveBodies(0) + // ConjunctiveQuery(headatom, bodyatoms.toList) + ConjunctiveQuery(headatom, disjunctiveBodies) } def relationType: Parser[RelationType] = diff --git a/DeepDiveLogPrettyPrinter.scala b/DeepDiveLogPrettyPrinter.scala index d387d9fc8..5b3171b9a 100644 --- a/DeepDiveLogPrettyPrinter.scala +++ b/DeepDiveLogPrettyPrinter.scala @@ -49,8 +49,11 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { val vars = a.terms map { _.varName } s"${a.name}(${vars.mkString(", ")})" } + val printListAtom = {a:List[Atom] => + s"${(a map printAtom).mkString(",\n ")}" + } s"""${printAtom(cq.head)} :- - | ${(cq.body map printAtom).mkString(",\n ")}""".stripMargin + | ${(cq.bodies map printListAtom).mkString(";\n ")}""".stripMargin } def print(stmt: ExtractionRule): String = { From 7dcd16d01bd4766a84b1dbda1eae4b1e33893b24 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 21 May 2015 15:34:00 -0700 Subject: [PATCH 082/347] change rule to semantics in example --- examples/spouse_example.ddl | 2 +- examples/spouse_example.print-incremental.expected | 2 +- examples/spouse_example.print.expected | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index 94a0266e6..1e910528d 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -93,4 +93,4 @@ has_spouse(rid) :- has_spouse_features(rid, f) weight = f label = l -rule = imply. +semantics = imply. diff --git a/examples/spouse_example.print-incremental.expected b/examples/spouse_example.print-incremental.expected index 0b2a5e9e2..0dececb3e 100644 --- a/examples/spouse_example.print-incremental.expected +++ b/examples/spouse_example.print-incremental.expected @@ -256,4 +256,4 @@ dd_delta_has_spouse(rid) :- dd_delta_has_spouse_features(rid, f) weight = f label = l - rule = imply. + semantics = imply. diff --git a/examples/spouse_example.print.expected b/examples/spouse_example.print.expected index ba4d0d933..eefd87be5 100644 --- a/examples/spouse_example.print.expected +++ b/examples/spouse_example.print.expected @@ -88,4 +88,4 @@ has_spouse(rid) :- has_spouse_features(rid, f) weight = f label = l - rule = imply. + semantics = imply. From 3d18125b67c72faa27d6adf86d5ce5da1ff43578 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 21 May 2015 15:35:33 -0700 Subject: [PATCH 083/347] change rule to semantics in deriver and printer --- .../scala/org/deepdive/ddlog/DeepDiveLogParser.scala | 12 ++++++------ .../deepdive/ddlog/DeepDiveLogPrettyPrinter.scala | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 2eec10f71..4b84267d4 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -38,7 +38,7 @@ case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statem case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration]) extends Statement case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String, rule : String = "imply") extends Statement // Weighted rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String, semantics : String = "imply") extends Statement // Weighted rule // Parser @@ -60,7 +60,7 @@ class DeepDiveLogParser extends JavaTokenParsers { } def variableName = ident def functionName = ident - def ruleType = ident + def semanticType = ident def columnDeclaration: Parser[Column] = columnName ~ columnType ^^ { @@ -148,13 +148,13 @@ class DeepDiveLogParser extends JavaTokenParsers { def supervision = "label" ~> "=" ~> variableName - def rule = "rule" ~> "=" ~> ruleType + def semantics = "semantics" ~> "=" ~> semanticType def inferenceRule : Parser[InferenceRule] = - ( conjunctiveQuery ~ factorWeight ~ supervision ~ rule + ( conjunctiveQuery ~ factorWeight ~ supervision ~ semantics ) ^^ { - case (q ~ weight ~ supervision ~ rule) => - InferenceRule(q, weight, supervision, rule) + case (q ~ weight ~ supervision ~ semantics) => + InferenceRule(q, weight, supervision, semantics) } // rules or schema elements in arbitrary order diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index e7b52565c..f0b2a944b 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -79,8 +79,8 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { ( if (stmt.supervision == null) "" else "\n label = " + stmt.supervision ) + - ( if (stmt.rule == null) "" - else "\n rule = " + stmt.rule + ( if (stmt.semantics == null) "" + else "\n semantics = " + stmt.semantics ) + "." } From cc852dfd3dbc6f3a6e2341eff5f4be31f5d7bc4a Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 21 May 2015 15:36:31 -0700 Subject: [PATCH 084/347] refactor derivation function --- .../ddlog/DeepDiveLogDeltaDeriver.scala | 244 ++++++++---------- 1 file changed, 105 insertions(+), 139 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index a1fa2dbc8..3508b784c 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -16,34 +16,102 @@ object DeepDiveLogDeltaDeriver{ case s: InferenceRule => transform(s) } + def transform(cq: ConjunctiveQuery): ConjunctiveQuery = { + // New head + val incCqHead = cq.head.copy( + name = deltaPrefix + cq.head.name, + terms = cq.head.terms map {term => term.copy(relName = deltaPrefix + term.relName)} + ) + var incCqBodies = new ListBuffer[List[Atom]]() + // New incremental bodies + for (body <- cq.bodies) { + // Delta body + val incDeltaBody = body map { + a => a.copy( + name = deltaPrefix + a.name, + terms = a.terms map {term => term.copy(relName = deltaPrefix + term.relName)} + ) + } + // New body + val incNewBody = body map { + a => a.copy( + name = newPrefix + a.name, + terms = a.terms map {term => term.copy(relName = newPrefix + term.relName)} + ) + } + var i = 0 + var j = 0 + for (i <- 0 to (body.length - 1)) { + var newBody = new ListBuffer[Atom]() + for (j <- 0 to (body.length - 1)) { + if (j > i) + newBody += body(j) + else if (j < i) + newBody += incNewBody(j) + else if (j == i) + newBody += incDeltaBody(j) + } + incCqBodies += newBody.toList + } + } + ConjunctiveQuery(incCqHead, incCqBodies.toList) + } + // Incremental scheme declaration, // keep the original scheme and create one delta scheme def transform(stmt: SchemaDeclaration): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() - // Origin table - incrementalStatement += stmt + // Incremental table + val incStmt = stmt.copy( + a = stmt.a.copy( + terms = stmt.isQuery match { + case true => stmt.a.terms + case false => stmt.a.terms :+ Variable("dd_count", stmt.a.name, stmt.a.terms.length) + }, + types = stmt.isQuery match { + case true => stmt.a.types + case false => stmt.a.types :+ "int" + }) + ) + incrementalStatement += incStmt + // Delta table - var deltaTerms = new ListBuffer[Variable]() - for (term <- stmt.a.terms) { - deltaTerms += Variable(term.varName, deltaPrefix + term.relName, term.index) - } - var deltaStmt = SchemaDeclaration(Attribute(deltaPrefix + stmt.a.name, deltaTerms.toList, stmt.a.types), stmt.isQuery) - - incrementalStatement += deltaStmt + val incDeltaStmt = stmt.copy( + a = stmt.a.copy( + name = deltaPrefix + stmt.a.name, + terms = stmt.isQuery match { + case true => stmt.a.terms map {term => term.copy(relName = deltaPrefix + term.relName)} + case false => (stmt.a.terms map {term => term.copy(relName = deltaPrefix + term.relName)}) :+ + Variable("dd_count", deltaPrefix + stmt.a.name, stmt.a.terms.length) + }, + types = stmt.isQuery match { + case true => stmt.a.types + case false => stmt.a.types :+ "int" + } + ) + ) + incrementalStatement += incDeltaStmt + // New table - val newTerms = new ListBuffer[Variable]() - for (term <- stmt.a.terms) { - newTerms += Variable(term.varName, newPrefix + term.relName, term.index) - } - var newStmt = SchemaDeclaration(Attribute(newPrefix + stmt.a.name, newTerms.toList, stmt.a.types), stmt.isQuery) - incrementalStatement += newStmt + val incNewStmt = stmt.copy( + a = stmt.a.copy( + name = newPrefix + stmt.a.name, + terms = stmt.isQuery match { + case true => stmt.a.terms map {term => term.copy(relName = newPrefix + term.relName)} + case false => (stmt.a.terms map {term => term.copy(relName = newPrefix + term.relName)}) :+ + Variable("dd_count", newPrefix + stmt.a.name, stmt.a.terms.length) + }, + types = stmt.isQuery match { + case true => stmt.a.types + case false => stmt.a.types :+ "int" + } + ) + ) + incrementalStatement += incNewStmt + if (!stmt.isQuery) { - var newStmtCqBodies = new ListBuffer[List[Atom]]() - newStmtCqBodies += List(Atom(stmt.a.name, stmt.a.terms.toList)) - newStmtCqBodies += List(Atom(deltaStmt.a.name, deltaStmt.a.terms.toList)) - incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(newStmt.a.name, newStmt.a.terms.toList), newStmtCqBodies.toList)) - - // incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(newStmt.a.name, newStmt.a.terms.toList), List(Atom(deltaStmt.a.name, deltaStmt.a.terms.toList)))) + incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(incNewStmt.a.name, incNewStmt.a.terms), + List(List(Atom(incStmt.a.name, incStmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))))) } incrementalStatement.toList } @@ -51,140 +119,38 @@ object DeepDiveLogDeltaDeriver{ // Incremental function declaration, // create one delta function scheme based on original function scheme def transform(stmt: FunctionDeclaration): List[Statement] = { - var incrementalStatement = new ListBuffer[Statement]() - var newTerms = new ListBuffer[Variable]() - var newInputType: RelationType = stmt.inputType match { - case inTy: RelationTypeDeclaration => { - var newNames = new ListBuffer[String]() - for (name <- inTy.names) - newNames += deltaPrefix + name - RelationTypeDeclaration(newNames.toList, inTy.types) - } - case inTy: RelationTypeAlias => RelationTypeAlias(deltaPrefix + inTy.likeRelationName) - } - var newOutputType: RelationType = stmt.outputType match { - case outTy: RelationTypeDeclaration => { - var newNames = new ListBuffer[String]() - for (name <- outTy.names) - newNames += deltaPrefix + name - RelationTypeDeclaration(newNames.toList, outTy.types) + List(stmt.copy( + inputType = stmt.inputType match { + case inTy: RelationTypeDeclaration => + inTy.copy(names = inTy.names map {name => deltaPrefix + name}) + case inTy: RelationTypeAlias => + inTy.copy(likeRelationName = deltaPrefix + inTy.likeRelationName) + }, + outputType = stmt.outputType match { + case outTy: RelationTypeDeclaration => + outTy.copy(names = outTy.names map {name => deltaPrefix + name}) + case outTy: RelationTypeAlias => + outTy.copy(likeRelationName = deltaPrefix + outTy.likeRelationName) } - case outTy: RelationTypeAlias => RelationTypeAlias(deltaPrefix + outTy.likeRelationName) - } - incrementalStatement += FunctionDeclaration(stmt.functionName, newInputType, newOutputType, stmt.implementations) - incrementalStatement.toList + )) } // Incremental extraction rule, // create delta rules based on original extraction rule def transform(stmt: ExtractionRule): List[Statement] = { - var incrementalStatement = new ListBuffer[Statement]() - - // New head - var newStmtCqHeadTerms = new ListBuffer[Variable]() - for (headTerm <- stmt.q.head.terms) { - newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) - } - var newStmtCqHead = Atom(deltaPrefix + stmt.q.head.name, newStmtCqHeadTerms.toList) - var newStmtCqBodies = new ListBuffer[List[Atom]]() - for (stmtCqBody <- stmt.q.bodies) { - // dd delta table from dd_delta_ table - var ddDeltaStmtCqBody = new ListBuffer[Atom]() - for (cqBody <- stmtCqBody) { // List[Atom] - var stmtCqBodyTerms = new ListBuffer[Variable]() - for (bodyTerm <- cqBody.terms) { - stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) - } - ddDeltaStmtCqBody += Atom(deltaPrefix + cqBody.name, stmtCqBodyTerms.toList) - } - // dd new body from dd_new_ table - var ddNewStmtCqBody = new ListBuffer[Atom]() - for (cqBody <- stmtCqBody) { // List[Atom] - var stmtCqBodyTerms = new ListBuffer[Variable]() - for (bodyTerm <- cqBody.terms) { - stmtCqBodyTerms += Variable(bodyTerm.varName, newPrefix + bodyTerm.relName, bodyTerm.index) - } - ddNewStmtCqBody += Atom(newPrefix + cqBody.name, stmtCqBodyTerms.toList) - } - - // New statement - var i = 0 - var j = 0 - for (i <- 0 to (stmtCqBody.length - 1)) { - var newStmtCqBody = new ListBuffer[Atom]() - for (j <- 0 to (stmtCqBody.length - 1)) { - if (j > i) - newStmtCqBody += stmtCqBody(j) - else if (j < i) - newStmtCqBody += ddNewStmtCqBody(j) - else if (j == i) - newStmtCqBody += ddDeltaStmtCqBody(j) - } - newStmtCqBodies += newStmtCqBody.toList - } - } - incrementalStatement += ExtractionRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBodies.toList)) - incrementalStatement.toList + List(ExtractionRule(transform(stmt.q))) } // Incremental function call rule, // modify function input and output def transform(stmt: FunctionCallRule): List[Statement] = { - var incrementalStatement = new ListBuffer[Statement]() - incrementalStatement += FunctionCallRule(deltaPrefix + stmt.input, deltaPrefix + stmt.output, stmt.function) - incrementalStatement.toList + List(FunctionCallRule(deltaPrefix + stmt.input, deltaPrefix + stmt.output, stmt.function)) } // Incremental inference rule, // create delta rules based on original extraction rule def transform(stmt: InferenceRule): List[Statement] = { - var incrementalStatement = new ListBuffer[Statement]() - - // New head - var newStmtCqHeadTerms = new ListBuffer[Variable]() - for (headTerm <- stmt.q.head.terms) { - newStmtCqHeadTerms += Variable(headTerm.varName, deltaPrefix + headTerm.relName, headTerm.index) - } - var newStmtCqHead = Atom(deltaPrefix + stmt.q.head.name, newStmtCqHeadTerms.toList) - var newStmtCqBodies = new ListBuffer[List[Atom]]() - for (stmtCqBody <- stmt.q.bodies) { - // dd delta table from dd_delta_ table - var ddDeltaStmtCqBody = new ListBuffer[Atom]() - for (cqBody <- stmtCqBody) { // List[Atom] - var stmtCqBodyTerms = new ListBuffer[Variable]() - for (bodyTerm <- cqBody.terms) { - stmtCqBodyTerms += Variable(bodyTerm.varName, deltaPrefix + bodyTerm.relName, bodyTerm.index) - } - ddDeltaStmtCqBody += Atom(deltaPrefix + cqBody.name, stmtCqBodyTerms.toList) - } - // dd new body from dd_new_ table - var ddNewStmtCqBody = new ListBuffer[Atom]() - for (cqBody <- stmtCqBody) { // List[Atom] - var stmtCqBodyTerms = new ListBuffer[Variable]() - for (bodyTerm <- cqBody.terms) { - stmtCqBodyTerms += Variable(bodyTerm.varName, newPrefix + bodyTerm.relName, bodyTerm.index) - } - ddNewStmtCqBody += Atom(newPrefix + cqBody.name, stmtCqBodyTerms.toList) - } - - // New statement - var i = 0 - var j = 0 - for (i <- 0 to (stmtCqBody.length - 1)) { - var newStmtCqBody = new ListBuffer[Atom]() - for (j <- 0 to (stmtCqBody.length - 1)) { - if (j > i) - newStmtCqBody += stmtCqBody(j) - else if (j < i) - newStmtCqBody += ddNewStmtCqBody(j) - else if (j == i) - newStmtCqBody += ddDeltaStmtCqBody(j) - } - newStmtCqBodies += newStmtCqBody.toList - } - } - incrementalStatement += InferenceRule(ConjunctiveQuery(newStmtCqHead, newStmtCqBodies.toList), stmt.weights, stmt.supervision, stmt.rule) - incrementalStatement.toList + List(InferenceRule(transform(stmt.q), stmt.weights, stmt.supervision, stmt.semantics)) } def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { From 57ae857529d2b12e5689c035ff362a4e047f2299 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 21 May 2015 16:01:53 -0700 Subject: [PATCH 085/347] refactor compile function --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 62 +++++++++++-------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 6ee7c18ef..e177de92a 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -62,6 +62,12 @@ import scala.collection.immutable.HashMap import org.apache.commons.lang3.StringEscapeUtils import scala.collection.mutable.ListBuffer +object AliasStyle extends Enumeration { + type AliasStyle = Value + val OriginalOnly, AliasOnly, OriginalAndAlias = Value +} +import AliasStyle._ + // This handles the schema statements. // It can tell you if a predicate is a "query" predicate or a "ground prediate" // and it resolves Variables their correct and true name in the schema, i.e. R(x,y) then x could be Attribute1 declared. @@ -89,7 +95,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C var inferenceRuleGroupByHead : Map[String, List[InferenceRule]] = new HashMap[String, List[InferenceRule]]() var functionCallRuleGroupByInput : Map[String, List[FunctionCallRule]] = new HashMap[String, List[FunctionCallRule]]() var functionCallRuleGroupByOutput : Map[String, List[FunctionCallRule]] = new HashMap[String, List[FunctionCallRule]]() - + def init() = { // generate the statements. isIncremental = config.isIncremental @@ -148,7 +154,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } // Resolve a column name with alias - def resolveColumn(s: String, qs: QuerySchema, q : ConjunctiveQuery, alias: Int) : Option[String] = { + def resolveColumn(s: String, qs: QuerySchema, q : ConjunctiveQuery, alias: AliasStyle) : Option[String] = { val index = qs.getBodyIndex(s) val name = resolveName(qs.getVar(s)) val relation = q.bodies(0)(index).name @@ -157,9 +163,9 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // 1 => use column name and alias for normal extraction rule // 2 => use column name for dd_new_ tables alias match { - case 0 => Some(s"${relation}.R${index}.${name}") - case 1 => Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) - case 2 => Some(s"R${index}.${name}") + case AliasStyle.OriginalOnly => Some(s"R${index}.${name}") + case AliasStyle.AliasOnly => Some(s"${relation}.R${index}.${name}") + case AliasStyle.OriginalAndAlias => Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) } } @@ -283,25 +289,29 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var inputQueries = new ListBuffer[String]() for (stmt <- stmts) { for (cqBody <- stmt.q.bodies) { - var tmpStmt = ConjunctiveQuery(stmt.q.head, List(cqBody)) + var tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) // Generate the body of the query. - val qs = new QuerySchema( tmpStmt ) + val qs = new QuerySchema( tmpCq ) // variable columns - var resolveColumnFlag = tmpStmt.head.name.startsWith("dd_new_") match { - case true => 2 - case false => 1 + // dd_new_ tale only need original column name to make sure the schema is the same with original table + val tmpCqIsForNewTable = tmpCq.head.name.startsWith("dd_new_") + var resolveColumnFlag = tmpCqIsForNewTable match { + case true => AliasStyle.OriginalOnly + case false => AliasStyle.OriginalAndAlias } - val variableCols = tmpStmt.head.terms flatMap { - case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, tmpStmt, resolveColumnFlag) + val variableCols = tmpCq.head.terms flatMap { + case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, tmpCq, resolveColumnFlag) } val selectStr = variableCols.mkString(", ") - val ddCount = if (ss.isIncremental && (resolveColumnFlag == 1)) ( tmpStmt.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + // additional dd_count column will be added in incremental version not dd_new_ table + // dd_new_ table does not need additional dd_count column + val ddCount = if (ss.isIncremental && !tmpCqIsForNewTable) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" inputQueries += s""" SELECT ${selectStr}${ddCountStr} - ${ ss.generateSQLBody(tmpStmt) }""" + ${ ss.generateSQLBody(tmpCq) }""" } } val blockName = ss.resolveExtractorBlockName(stmts(0)) @@ -359,20 +369,20 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var inputQueries = new ListBuffer[String]() for (z <- zs) { for (cqBody <- z.q.bodies) { - var tmpStmt = ConjunctiveQuery(z.q.head, List(cqBody)) - val qs = new QuerySchema(tmpStmt) - val headTerms = tmpStmt.head.terms map { + var tmpCq = ConjunctiveQuery(z.q.head, List(cqBody)) + val qs = new QuerySchema(tmpCq) + val headTerms = tmpCq.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" } val index = qs.getBodyIndex(z.supervision) val name = ss.resolveName(qs.getVar(z.supervision)) val labelCol = s"R${index}.${name}" val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") - val ddCount = if (ss.isIncremental) ( tmpStmt.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCount = if (ss.isIncremental) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} - ${ ss.generateSQLBody(tmpStmt) } + ${ ss.generateSQLBody(tmpCq) } """ } } @@ -399,20 +409,20 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var func = "" var weight = "" for (cqBody <- stmt.q.bodies) { - var tmpStmt = ConjunctiveQuery(stmt.q.head, List(cqBody)) + var tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) // edge query - val fakeBody = tmpStmt.head +: tmpStmt.bodies(0) + val fakeBody = tmpCq.head +: tmpCq.bodies(0) // val fakeBody = stmt.q.bodies +: List(stmt.q.head) - val fakeCQ = ConjunctiveQuery(tmpStmt.head, List(fakeBody)) // we will just use the fakeBody below. + val fakeCQ = ConjunctiveQuery(tmpCq.head, List(fakeBody)) // we will just use the fakeBody below. - val index = tmpStmt.bodies(0).length + 1 + val index = tmpCq.bodies(0).length + 1 val qs2 = new QuerySchema( fakeCQ ) - val variableIdsStr = Some(s"""R0.id AS "${tmpStmt.head.name}.R0.id" """) + val variableIdsStr = Some(s"""R0.id AS "${tmpCq.head.name}.R0.id" """) // weight string val uwStr = stmt.weights match { case KnownFactorWeight(x) => None - case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, 1)).mkString(", ")) + case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, AliasStyle.OriginalAndAlias)).mkString(", ")) } val selectStr = (List(variableIdsStr, uwStr) flatten).mkString(", ") @@ -431,7 +441,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { weight = stmt.weights match { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { - s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, 0)).mkString(", ")})""" + s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, AliasStyle.AliasOnly)).mkString(", ")})""" } } } From b83824e92a31a2d8bca1277eab725607f1e5d804 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 21 May 2015 16:07:07 -0700 Subject: [PATCH 086/347] minor changes --- .../scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index e177de92a..2dcf267e8 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -227,7 +227,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // Analyze the dependency between statements and construct a graph. def analyzeDependency(statements: List[Statement]) = { - var stmtByHeadName = (extractionRuleGroupByHead.toSeq ++ inferenceRuleGroupByHead.toSeq ++ functionCallRuleGroupByOutput.toSeq).groupBy(_._1).mapValues(_.map(_._2).toList) + val stmtByHeadName = (extractionRuleGroupByHead.toSeq ++ inferenceRuleGroupByHead.toSeq ++ functionCallRuleGroupByOutput.toSeq).groupBy(_._1).mapValues(_.map(_._2).toList) // Look at the body of each statement to construct a dependency graph statements foreach { @@ -289,13 +289,13 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var inputQueries = new ListBuffer[String]() for (stmt <- stmts) { for (cqBody <- stmt.q.bodies) { - var tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) + val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) // Generate the body of the query. val qs = new QuerySchema( tmpCq ) // variable columns // dd_new_ tale only need original column name to make sure the schema is the same with original table val tmpCqIsForNewTable = tmpCq.head.name.startsWith("dd_new_") - var resolveColumnFlag = tmpCqIsForNewTable match { + val resolveColumnFlag = tmpCqIsForNewTable match { case true => AliasStyle.OriginalOnly case false => AliasStyle.OriginalAndAlias } @@ -369,7 +369,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var inputQueries = new ListBuffer[String]() for (z <- zs) { for (cqBody <- z.q.bodies) { - var tmpCq = ConjunctiveQuery(z.q.head, List(cqBody)) + val tmpCq = ConjunctiveQuery(z.q.head, List(cqBody)) val qs = new QuerySchema(tmpCq) val headTerms = tmpCq.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" @@ -409,7 +409,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var func = "" var weight = "" for (cqBody <- stmt.q.bodies) { - var tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) + val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) // edge query val fakeBody = tmpCq.head +: tmpCq.bodies(0) // val fakeBody = stmt.q.bodies +: List(stmt.q.head) From e23d99ed8f84edd87661cba7b80aaf9dd9e2012a Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 21 May 2015 16:58:26 -0700 Subject: [PATCH 087/347] refactor --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 32 ++++++------ .../ddlog/DeepDiveLogDeltaDeriver.scala | 51 ++++++++----------- 2 files changed, 38 insertions(+), 45 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 2dcf267e8..f8cab1cbd 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -64,7 +64,12 @@ import scala.collection.mutable.ListBuffer object AliasStyle extends Enumeration { type AliasStyle = Value + // Three kinds of column expressions: + // OriginalOnly => use column name for dd_new_ tables + // AliasOnly => use alias for unknown weight in inference + // OriginalAndAlias => use column name and alias for normal extraction rule val OriginalOnly, AliasOnly, OriginalAndAlias = Value + } import AliasStyle._ @@ -158,14 +163,10 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C val index = qs.getBodyIndex(s) val name = resolveName(qs.getVar(s)) val relation = q.bodies(0)(index).name - // Three kinds of column expressions: - // 0 => use alias for unknown weight in inference - // 1 => use column name and alias for normal extraction rule - // 2 => use column name for dd_new_ tables alias match { - case AliasStyle.OriginalOnly => Some(s"R${index}.${name}") - case AliasStyle.AliasOnly => Some(s"${relation}.R${index}.${name}") - case AliasStyle.OriginalAndAlias => Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) + case OriginalOnly => Some(s"R${index}.${name}") + case AliasOnly => Some(s"${relation}.R${index}.${name}") + case OriginalAndAlias => Some(s"""R${index}.${name} AS "${relation}.R${index}.${name}" """) } } @@ -296,8 +297,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // dd_new_ tale only need original column name to make sure the schema is the same with original table val tmpCqIsForNewTable = tmpCq.head.name.startsWith("dd_new_") val resolveColumnFlag = tmpCqIsForNewTable match { - case true => AliasStyle.OriginalOnly - case false => AliasStyle.OriginalAndAlias + case true => OriginalOnly + case false => OriginalAndAlias } val variableCols = tmpCq.head.terms flatMap { case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, tmpCq, resolveColumnFlag) @@ -409,20 +410,19 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var func = "" var weight = "" for (cqBody <- stmt.q.bodies) { - val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) // edge query - val fakeBody = tmpCq.head +: tmpCq.bodies(0) + val fakeBody = stmt.q.head +: cqBody // val fakeBody = stmt.q.bodies +: List(stmt.q.head) - val fakeCQ = ConjunctiveQuery(tmpCq.head, List(fakeBody)) // we will just use the fakeBody below. + val fakeCQ = ConjunctiveQuery(stmt.q.head, List(fakeBody)) // we will just use the fakeBody below. - val index = tmpCq.bodies(0).length + 1 + val index = cqBody.length + 1 val qs2 = new QuerySchema( fakeCQ ) - val variableIdsStr = Some(s"""R0.id AS "${tmpCq.head.name}.R0.id" """) + val variableIdsStr = Some(s"""R0.id AS "${stmt.q.head.name}.R0.id" """) // weight string val uwStr = stmt.weights match { case KnownFactorWeight(x) => None - case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, AliasStyle.OriginalAndAlias)).mkString(", ")) + case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, OriginalAndAlias)).mkString(", ")) } val selectStr = (List(variableIdsStr, uwStr) flatten).mkString(", ") @@ -441,7 +441,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { weight = stmt.weights match { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { - s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, AliasStyle.AliasOnly)).mkString(", ")})""" + s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, AliasOnly)).mkString(", ")})""" } } } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 3508b784c..fda08cc32 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -62,49 +62,42 @@ object DeepDiveLogDeltaDeriver{ def transform(stmt: SchemaDeclaration): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() // Incremental table - val incStmt = stmt.copy( + val incStmt = if (stmt.isQuery) stmt else stmt.copy( a = stmt.a.copy( - terms = stmt.isQuery match { - case true => stmt.a.terms - case false => stmt.a.terms :+ Variable("dd_count", stmt.a.name, stmt.a.terms.length) - }, - types = stmt.isQuery match { - case true => stmt.a.types - case false => stmt.a.types :+ "int" - }) + terms = stmt.a.terms :+ Variable("dd_count", stmt.a.name, stmt.a.terms.length), + types = stmt.a.types :+ "int" + ) ) incrementalStatement += incStmt // Delta table - val incDeltaStmt = stmt.copy( + var incDeltaStmt = stmt.copy( a = stmt.a.copy( name = deltaPrefix + stmt.a.name, - terms = stmt.isQuery match { - case true => stmt.a.terms map {term => term.copy(relName = deltaPrefix + term.relName)} - case false => (stmt.a.terms map {term => term.copy(relName = deltaPrefix + term.relName)}) :+ - Variable("dd_count", deltaPrefix + stmt.a.name, stmt.a.terms.length) - }, - types = stmt.isQuery match { - case true => stmt.a.types - case false => stmt.a.types :+ "int" - } + terms = stmt.a.terms map {term => term.copy(relName = deltaPrefix + term.relName)}, + types = stmt.a.types + ) + ) + if (!stmt.isQuery) incDeltaStmt = incDeltaStmt.copy( + a = incDeltaStmt.a.copy( + terms = incDeltaStmt.a.terms :+ Variable("dd_count", deltaPrefix + stmt.a.name, stmt.a.terms.length), + types = incDeltaStmt.a.types :+ "int" ) ) incrementalStatement += incDeltaStmt // New table - val incNewStmt = stmt.copy( + var incNewStmt = stmt.copy( a = stmt.a.copy( name = newPrefix + stmt.a.name, - terms = stmt.isQuery match { - case true => stmt.a.terms map {term => term.copy(relName = newPrefix + term.relName)} - case false => (stmt.a.terms map {term => term.copy(relName = newPrefix + term.relName)}) :+ - Variable("dd_count", newPrefix + stmt.a.name, stmt.a.terms.length) - }, - types = stmt.isQuery match { - case true => stmt.a.types - case false => stmt.a.types :+ "int" - } + terms = stmt.a.terms map {term => term.copy(relName = newPrefix + term.relName)}, + types = stmt.a.types + ) + ) + if (!stmt.isQuery) incNewStmt = incNewStmt.copy( + a = incNewStmt.a.copy( + terms = stmt.a.terms :+ Variable("dd_count", newPrefix + stmt.a.name, stmt.a.terms.length), + types = stmt.a.types :+ "int" ) ) incrementalStatement += incNewStmt From 69c64e7fe7cb810cf461324ced34cd2d7aa884f1 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 00:35:41 -0700 Subject: [PATCH 088/347] Corrects confusing bats test names for --incremental --- test/expected-output-test.bats.per-example | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/expected-output-test.bats.per-example b/test/expected-output-test.bats.per-example index a6dfe2361..5df8042a3 100644 --- a/test/expected-output-test.bats.per-example +++ b/test/expected-output-test.bats.per-example @@ -46,7 +46,7 @@ setup() { ## tests for --incremental support # compare the compiled output of the incremental version with what's expected -@test "print $EXAMPLE_NAME as expected" { +@test "compile --incremental $EXAMPLE_NAME as expected" { expectedOutput=$EXAMPLE_BASEPATH.compile-incremental.expected [ -e "$expectedOutput" ] || skip scala "$DDLOG_JAR" compile --incremental "$EXAMPLE" | @@ -54,9 +54,9 @@ setup() { } # compare the pretty-printed output of the incremental version with what's expected -@test "print $EXAMPLE_NAME as expected" { +@test "print --incremental $EXAMPLE_NAME as expected" { expectedOutput=$EXAMPLE_BASEPATH.print-incremental.expected [ -e "$expectedOutput" ] || skip scala "$DDLOG_JAR" print --incremental "$EXAMPLE" | diff -u "$expectedOutput" - -} \ No newline at end of file +} From eb9708110ac25ad8d7e02d0486001756bfe0c79a Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 00:41:48 -0700 Subject: [PATCH 089/347] Cleans up tests - Removes stale test examples - Corrects some expected outputs - Renames to more meaningful names - test.sh will clean up generated ones that don't exist any longer --- ...pected => rstu.print-incremental.expected} | 0 examples/spouse_incremental_example.ddl | 104 ------------------ examples/{test9.ddl => test-semantics.ddl} | 2 +- examples/test1.ddl | 4 - examples/test2.ddl | 7 -- examples/test3.ddl | 19 ---- examples/test4.ddl | 18 --- examples/test5.ddl | 16 --- test/test.sh | 1 + 9 files changed, 2 insertions(+), 169 deletions(-) rename examples/{rstu.delta.expected => rstu.print-incremental.expected} (100%) delete mode 100644 examples/spouse_incremental_example.ddl rename examples/{test9.ddl => test-semantics.ddl} (90%) delete mode 100644 examples/test1.ddl delete mode 100644 examples/test2.ddl delete mode 100644 examples/test3.ddl delete mode 100644 examples/test4.ddl delete mode 100644 examples/test5.ddl diff --git a/examples/rstu.delta.expected b/examples/rstu.print-incremental.expected similarity index 100% rename from examples/rstu.delta.expected rename to examples/rstu.print-incremental.expected diff --git a/examples/spouse_incremental_example.ddl b/examples/spouse_incremental_example.ddl deleted file mode 100644 index 11503d7d9..000000000 --- a/examples/spouse_incremental_example.ddl +++ /dev/null @@ -1,104 +0,0 @@ -articles( - article_id text, - text text, - dd_count int). - -sentences( - document_id text, - sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], - sentence_offset int, - sentence_id text, - dd_count int). - -people_mentions( - sentence_id text, - start_position int, - length int, - text text, - mention_id text, - dd_count int). - -has_spouse_candidates( - person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text, - is_true boolean, - dd_count int). - -has_spouse_features( - relation_id text, - feature text, - dd_count int). - -has_spouse?(relation_id text). - -people_mentions :- - !ext_people(ext_people_input). - -ext_people_input( - sentence_id text, - words text[], - ner_tags text[], - dd_count int). - -ext_people_input(s, words, ner_tags) :- - sentences(a, b, words, c, d, e, ner_tags, f, s). - -function ext_people over like ext_people_input - returns like people_mentions - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. - -has_spouse_candidates :- - !ext_has_spouse(ext_has_spouse_input). - -ext_has_spouse_input( - sentence_id text, - p1_id text, - p1_text text, - p2_id text, - p2_text text, - dd_count int). - -ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- - people_mentions(s, a, b, p1_text, p1_id), - people_mentions(s, c, d, p2_text, p2_id). - -function ext_has_spouse over like ext_has_spouse_input - returns like has_spouse_candidates - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. - -has_spouse_features :- - !ext_has_spouse_features(ext_has_spouse_features_input). - -ext_has_spouse_features_input( - words text[], - relation_id text, - p1_start_position int, - p1_length int, - p2_start_position int, - p2_length int, - dd_count int). - -ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- - sentences(a, b, words, c, d, e, f, g, s), - has_spouse_candidates(person1_id, person2_id, s, h, rid, x), - people_mentions(s, p1idx, p1len, k, person1_id), - people_mentions(s, p2idx, p2len, l, person2_id). - -function ext_has_spouse_features over like ext_has_spouse_features_input - returns like has_spouse_features - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. - -has_spouse(rid) :- - has_spouse_candidates(a, b, c, d, rid, l), - has_spouse_features(rid, f) -weight = f -label = l -rule = imply. diff --git a/examples/test9.ddl b/examples/test-semantics.ddl similarity index 90% rename from examples/test9.ddl rename to examples/test-semantics.ddl index e93f7ad0f..3de8d36d1 100644 --- a/examples/test9.ddl +++ b/examples/test-semantics.ddl @@ -6,4 +6,4 @@ Q?(x int). Q(x) :- R(x, y, z); R(x, y, z), S(y, z, w); S(y, x, w), T(x, z, w) weight = y label = z -rule = imply. \ No newline at end of file +semantics = imply. diff --git a/examples/test1.ddl b/examples/test1.ddl deleted file mode 100644 index 2e0e20fdc..000000000 --- a/examples/test1.ddl +++ /dev/null @@ -1,4 +0,0 @@ - S(a1,a2); - R(pk,f)!; - Q(x) :- R(x,f) weight=f; - Q2(x) :- R(x, f), S(x, y) weight = f diff --git a/examples/test2.ddl b/examples/test2.ddl deleted file mode 100644 index 6add95d11..000000000 --- a/examples/test2.ddl +++ /dev/null @@ -1,7 +0,0 @@ - S(a1,a2); - R(pk,f); - Q(x) :- R(x,f) weight=f; - Q(x) :- S(x,y),T(y); - T(base_attr)!; - R(y,x) :- U(x,y); - S(x,y) :- R(x,y); diff --git a/examples/test3.ddl b/examples/test3.ddl deleted file mode 100644 index a5fdbc227..000000000 --- a/examples/test3.ddl +++ /dev/null @@ -1,19 +0,0 @@ - has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id); - has_spouse_features(relation_id, feature); - q(rid)!; - - q(y) :- - has_spouse(a, b, c, d, x, y), - has_spouse_features(y, f) - weight = f - label = x; - q(y) :- - has_spouse(a, b, c, d, x, y), - has_spouse_features(y, f) - weight = f - label = x; - - // f_has_spouse_symmetry(x, y) :- - // has_spouse(a1, a2, a3, a4, x, a6), - // has_spouse(a2, a1, b3, b4, y, b6) - // weight = 1; diff --git a/examples/test4.ddl b/examples/test4.ddl deleted file mode 100644 index 47989f676..000000000 --- a/examples/test4.ddl +++ /dev/null @@ -1,18 +0,0 @@ - articles(article_id, text); - sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); - people_mentions(sentence_id, start_position, length, text, mention_id); - has_spouse(person1_id, person2_id, sentence_id, description, is_true, relation_id, id); - has_spouse_features(relation_id, feature); - people_mentions(sentence_id, words, ner_tags):- - sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) - udf=ext_people; - has_spouse(sentence_id, p1.mention_id, p1.text, p2.mention_id, p2.text):- - people_mentions(sentence_id, p1.start_position, p1.length, p1.text, p1.mention_id), - people_mentions(sentence_id, p2.start_position, p2.length, p2.text, p2.mention_id) - udf=ext_has_spouse; - has_spouse_features(words, relation_id, p1.start_position, p1.length, p2.start_position, p2.length):- - sentences(s.document_id, s.sentence, words, s.lemma, s.pos_tags, s.dependencies, s.ner_tags, s.sentence_offset, sentence_id), - has_spouse(person1_id, person2_id, sentence_id, h.description, h.is_true, relation_id, h.id), - people_mentions(sentence_id, p1.start_position, p1.length, p1.text, person1_id), - people_mentions(sentence_id, p2.start_position, p2.length, p2.text, person2_id) - udf=ext_has_spouse_features; diff --git a/examples/test5.ddl b/examples/test5.ddl deleted file mode 100644 index 55144dda4..000000000 --- a/examples/test5.ddl +++ /dev/null @@ -1,16 +0,0 @@ - ext_people_input( - sentence_id, - words, - ner_tags). - function ext_has_spouse_features over like ext_has_spouse_features_input - returns like has_spouse_features - implementation udf/ext_has_spouse_features.py handles tsv lines. - function ext_people over like ext_people_input - returns like people_mentions - implementation udf/ext_people.py handles tsv lines. - ext_people_input(sentence_id, words, ner_tags):- - sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). - people_mentions :- - !ext_people(ext_people_input). - people_mentions_1 :- - !ext_people(people_mentions). diff --git a/test/test.sh b/test/test.sh index 6fe8347cc..643142895 100755 --- a/test/test.sh +++ b/test/test.sh @@ -8,6 +8,7 @@ type bats &>/dev/null || git clone https://github.com/sstephenson/bats.git # generate bats tests for every per-example templates +rm -f *.for-example-*.bats for t in *.bats.per-example; do testName=${t%.bats.per-example} # generate one for each example From fb9650fa52402ab3e5928ce1b0763cd60ca5603f Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 00:40:12 -0700 Subject: [PATCH 090/347] Improves parser to support comments C/Java/Scala style comments: // single line or /* multi line */ as well as shell script style comments: # single line --- examples/test-comments.ddl | 16 ++++++++++++++++ examples/test-comments.print.expected | 9 +++++++++ .../org/deepdive/ddlog/DeepDiveLogParser.scala | 4 ++++ 3 files changed, 29 insertions(+) create mode 100644 examples/test-comments.ddl create mode 100644 examples/test-comments.print.expected diff --git a/examples/test-comments.ddl b/examples/test-comments.ddl new file mode 100644 index 000000000..147644b75 --- /dev/null +++ b/examples/test-comments.ddl @@ -0,0 +1,16 @@ +# This is a comment +// This is also a comment + +R(a int). +S( a int # this is an integer field + , b text // seond one is a text field + , c /* third one is a float field */ float +). + +R(x) :- S(x, y, z). + +/* +Multi +-line +comments + /* unfortunately cannot be nested */ diff --git a/examples/test-comments.print.expected b/examples/test-comments.print.expected new file mode 100644 index 000000000..8b8213f4c --- /dev/null +++ b/examples/test-comments.print.expected @@ -0,0 +1,9 @@ +R(a int). + +S(a int, + b text, + c float). + +R(x) :- + S(x, y, z). + diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 4b84267d4..221b1cfd5 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -52,6 +52,10 @@ class DeepDiveLogParser extends JavaTokenParsers { s.stripPrefix("\"").stripSuffix("\"")) } + // C/Java/Scala-style as well as shell script-style comments are supported + // by treating them as whiteSpace + protected override val whiteSpace = """(?:(?:^|\s+)#.*|//.*|(?m)/\*(\*(?!/)|[^*])*\*/|\s)+""".r + // We just use Java identifiers to parse various names def relationName = ident def columnName = ident From 74f8959f19f694136c98cff14bb8b2da4de2053e Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 03:48:03 -0700 Subject: [PATCH 091/347] Makes expected output test leave actual output --- .gitignore | 1 + test/expected-output-test.bats.per-example | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index 67ba8778f..91b68d153 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ target /test/bats /test/*.for-example-*.bats +/examples/*.actual diff --git a/test/expected-output-test.bats.per-example b/test/expected-output-test.bats.per-example index 5df8042a3..fe5e0d12f 100644 --- a/test/expected-output-test.bats.per-example +++ b/test/expected-output-test.bats.per-example @@ -23,6 +23,7 @@ setup() { expectedOutput=$EXAMPLE_BASEPATH.compile.expected [ -e "$expectedOutput" ] || skip scala "$DDLOG_JAR" compile "$EXAMPLE" | + tee "${expectedOutput%.expected}.actual" | diff -u "$expectedOutput" - } @@ -31,6 +32,7 @@ setup() { expectedOutput=$EXAMPLE_BASEPATH.print.expected [ -e "$expectedOutput" ] || skip scala "$DDLOG_JAR" print "$EXAMPLE" | + tee "${expectedOutput%.expected}.actual" | diff -u "$expectedOutput" - } @@ -50,6 +52,7 @@ setup() { expectedOutput=$EXAMPLE_BASEPATH.compile-incremental.expected [ -e "$expectedOutput" ] || skip scala "$DDLOG_JAR" compile --incremental "$EXAMPLE" | + tee "${expectedOutput%.expected}.actual" | diff -u "$expectedOutput" - } @@ -58,5 +61,6 @@ setup() { expectedOutput=$EXAMPLE_BASEPATH.print-incremental.expected [ -e "$expectedOutput" ] || skip scala "$DDLOG_JAR" print --incremental "$EXAMPLE" | + tee "${expectedOutput%.expected}.actual" | diff -u "$expectedOutput" - } From 198afe0faa55f6329ca47a8223b5029743db4eb6 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 03:48:32 -0700 Subject: [PATCH 092/347] Fixes a test case to use multiple bodies for delta rules --- examples/rstu.print-incremental.expected | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/examples/rstu.print-incremental.expected b/examples/rstu.print-incremental.expected index 7c5ac4359..c0e725b31 100644 --- a/examples/rstu.print-incremental.expected +++ b/examples/rstu.print-incremental.expected @@ -23,11 +23,9 @@ dd_delta_T(x text, dd_new_T(x text, f text). -dd_new_T(x, f) :- - T(x, f). - -dd_new_T(x, f) :- - dd_delta_T(x, f). +dd_new_T(x, f, dd_count) :- + T(x, f, dd_count); + dd_delta_T(x, f, dd_count). U(x text, l text). @@ -43,14 +41,10 @@ R(x) :- dd_delta_R(x) :- dd_delta_S(x), T(x, f), - U(x, l). - -dd_delta_R(x) :- + U(x, l); dd_new_S(x), dd_delta_T(x, f), - U(x, l). - -dd_delta_R(x) :- + U(x, l); dd_new_S(x), dd_new_T(x, f), dd_delta_U(x, l). From f40d64a84ab30ee4ac256d8d8a7660113d887a47 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 03:59:00 -0700 Subject: [PATCH 093/347] Removes redundant test examples --- examples/{test8.ddl => test-multibody.ddl} | 0 examples/test.ddl | 8 -------- examples/test6.ddl | 5 ----- examples/test7.ddl | 6 ------ 4 files changed, 19 deletions(-) rename examples/{test8.ddl => test-multibody.ddl} (100%) delete mode 100644 examples/test.ddl delete mode 100644 examples/test6.ddl delete mode 100644 examples/test7.ddl diff --git a/examples/test8.ddl b/examples/test-multibody.ddl similarity index 100% rename from examples/test8.ddl rename to examples/test-multibody.ddl diff --git a/examples/test.ddl b/examples/test.ddl deleted file mode 100644 index bf6b427cf..000000000 --- a/examples/test.ddl +++ /dev/null @@ -1,8 +0,0 @@ -A(a1 int, - a2 int). -B(a1 int, - a2 int). -C(a1 int, - a2 int, - a3 int). -Q(a1) :- A(a1, x); B(y, a1); C(a, b, a1). diff --git a/examples/test6.ddl b/examples/test6.ddl deleted file mode 100644 index 35820ed03..000000000 --- a/examples/test6.ddl +++ /dev/null @@ -1,5 +0,0 @@ -R(a int, b int). -S(a int, b int). -Q(x int). - -Q(y) :- R(x, y); R(x, y), S(y, z). \ No newline at end of file diff --git a/examples/test7.ddl b/examples/test7.ddl deleted file mode 100644 index 6c1c7f0fe..000000000 --- a/examples/test7.ddl +++ /dev/null @@ -1,6 +0,0 @@ -R(a int, b int). -S(a int, b int). -T(a int, b int). -Q(x int). - -Q(y) :- R(x, y); R(x, y), S(y, z); S(y, x), T(x, z). \ No newline at end of file From 24c80282c1febef01544f7ab38bc9b43e7c8a9ba Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 04:01:00 -0700 Subject: [PATCH 094/347] Renames a test case to many_joins --- examples/{rstu.ddl => test-many_joins.ddl} | 0 ...mental.expected => test-many_joins.print-incremental.expected} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename examples/{rstu.ddl => test-many_joins.ddl} (100%) rename examples/{rstu.print-incremental.expected => test-many_joins.print-incremental.expected} (100%) diff --git a/examples/rstu.ddl b/examples/test-many_joins.ddl similarity index 100% rename from examples/rstu.ddl rename to examples/test-many_joins.ddl diff --git a/examples/rstu.print-incremental.expected b/examples/test-many_joins.print-incremental.expected similarity index 100% rename from examples/rstu.print-incremental.expected rename to examples/test-many_joins.print-incremental.expected From 567ca5c6d7d21a76064844834c374eaedf48bc6e Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 04:03:12 -0700 Subject: [PATCH 095/347] Adds a test for parsing every example --- test/expected-output-test.bats.per-example | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/test/expected-output-test.bats.per-example b/test/expected-output-test.bats.per-example index fe5e0d12f..61b49201d 100644 --- a/test/expected-output-test.bats.per-example +++ b/test/expected-output-test.bats.per-example @@ -18,6 +18,20 @@ setup() { ## tests for basic compilation and pretty-printing +# check if example can be parsed +@test "parse $EXAMPLE_NAME" { + scala "$DDLOG_JAR" print "$EXAMPLE" >/dev/null +} + +# check if print is idempotent +@test "print $EXAMPLE_NAME is idempotent" { + printed=$BATS_TMPDIR/ddlog-$EXAMPLE_NAME-printed.ddl + scala "$DDLOG_JAR" print "$EXAMPLE" >"$printed" || skip + scala "$DDLOG_JAR" print "$printed" | + diff -u "$printed" - +} + + # compare the compiled output with what's expected @test "compile $EXAMPLE_NAME" { expectedOutput=$EXAMPLE_BASEPATH.compile.expected @@ -36,14 +50,6 @@ setup() { diff -u "$expectedOutput" - } -# check if print is idempotent -@test "print $EXAMPLE_NAME is idempotent" { - printed=$BATS_TMPDIR/ddlog-$EXAMPLE_NAME-printed.ddl - scala "$DDLOG_JAR" print "$EXAMPLE" >"$printed" || skip - scala "$DDLOG_JAR" print "$printed" | - diff -u "$printed" - -} - ## tests for --incremental support From e1aaff493cd7bcfb28814b1786f8c7e0c0dd0546 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 04:51:18 -0700 Subject: [PATCH 096/347] Moves expected output tests under test/ - Each set of input with expected outputs is now put under a standard directory, which makes it super easy to add more tests by cloning existing ones. --- .gitignore | 5 +- test/expected-output-test.bats.per-example | 72 ---------------- test/expected-output-test.bats.template | 85 +++++++++++++++++++ test/expected-output-test/README.md | 13 +++ .../expected-output-test/many_joins/input.ddl | 0 .../many_joins/print-incremental.expected | 0 .../expected-output-test/multibody/input.ddl | 0 .../expected-output-test/semantics/input.ddl | 0 .../compile-incremental.expected | 0 .../spouse_example/compile.expected | 0 .../spouse_example/input.ddl | 1 + .../spouse_example/print-incremental.expected | 0 .../spouse_example/print.expected | 0 test/test.sh | 27 +++--- 14 files changed, 115 insertions(+), 88 deletions(-) delete mode 100644 test/expected-output-test.bats.per-example create mode 100644 test/expected-output-test.bats.template create mode 100644 test/expected-output-test/README.md rename examples/test-many_joins.ddl => test/expected-output-test/many_joins/input.ddl (100%) rename examples/test-many_joins.print-incremental.expected => test/expected-output-test/many_joins/print-incremental.expected (100%) rename examples/test-multibody.ddl => test/expected-output-test/multibody/input.ddl (100%) rename examples/test-semantics.ddl => test/expected-output-test/semantics/input.ddl (100%) rename examples/spouse_example.compile-incremental.expected => test/expected-output-test/spouse_example/compile-incremental.expected (100%) rename examples/spouse_example.compile.expected => test/expected-output-test/spouse_example/compile.expected (100%) create mode 120000 test/expected-output-test/spouse_example/input.ddl rename examples/spouse_example.print-incremental.expected => test/expected-output-test/spouse_example/print-incremental.expected (100%) rename examples/spouse_example.print.expected => test/expected-output-test/spouse_example/print.expected (100%) diff --git a/.gitignore b/.gitignore index 91b68d153..9ccfab0ac 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ /ddlog-test.jar /ddlog-test.jar.classpath target + /test/bats -/test/*.for-example-*.bats -/examples/*.actual +/test/expected-output-test/**/*.bats +/test/expected-output-test/**/*.actual diff --git a/test/expected-output-test.bats.per-example b/test/expected-output-test.bats.per-example deleted file mode 100644 index 61b49201d..000000000 --- a/test/expected-output-test.bats.per-example +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bats -# Per example tests -# -# This test compares outputs of various modes of ddlog against a .ddl example file with its expected output. -# Test is skipped if no expected output is found. - -# required variables -: ${DDLOG_JAR:?path to ddlog jar} -: ${EXAMPLE:?path to input ddlog program} -EXAMPLE_BASEPATH=${EXAMPLE%.ddl} -EXAMPLE_NAME=${EXAMPLE_BASEPATH##*/} - -# some preconditions -setup() { - [ -e "$DDLOG_JAR" ] - [ -e "$EXAMPLE" ] -} - -## tests for basic compilation and pretty-printing - -# check if example can be parsed -@test "parse $EXAMPLE_NAME" { - scala "$DDLOG_JAR" print "$EXAMPLE" >/dev/null -} - -# check if print is idempotent -@test "print $EXAMPLE_NAME is idempotent" { - printed=$BATS_TMPDIR/ddlog-$EXAMPLE_NAME-printed.ddl - scala "$DDLOG_JAR" print "$EXAMPLE" >"$printed" || skip - scala "$DDLOG_JAR" print "$printed" | - diff -u "$printed" - -} - - -# compare the compiled output with what's expected -@test "compile $EXAMPLE_NAME" { - expectedOutput=$EXAMPLE_BASEPATH.compile.expected - [ -e "$expectedOutput" ] || skip - scala "$DDLOG_JAR" compile "$EXAMPLE" | - tee "${expectedOutput%.expected}.actual" | - diff -u "$expectedOutput" - -} - -# compare the pretty-printed output with what's expected -@test "print $EXAMPLE_NAME as expected" { - expectedOutput=$EXAMPLE_BASEPATH.print.expected - [ -e "$expectedOutput" ] || skip - scala "$DDLOG_JAR" print "$EXAMPLE" | - tee "${expectedOutput%.expected}.actual" | - diff -u "$expectedOutput" - -} - - -## tests for --incremental support - -# compare the compiled output of the incremental version with what's expected -@test "compile --incremental $EXAMPLE_NAME as expected" { - expectedOutput=$EXAMPLE_BASEPATH.compile-incremental.expected - [ -e "$expectedOutput" ] || skip - scala "$DDLOG_JAR" compile --incremental "$EXAMPLE" | - tee "${expectedOutput%.expected}.actual" | - diff -u "$expectedOutput" - -} - -# compare the pretty-printed output of the incremental version with what's expected -@test "print --incremental $EXAMPLE_NAME as expected" { - expectedOutput=$EXAMPLE_BASEPATH.print-incremental.expected - [ -e "$expectedOutput" ] || skip - scala "$DDLOG_JAR" print --incremental "$EXAMPLE" | - tee "${expectedOutput%.expected}.actual" | - diff -u "$expectedOutput" - -} diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template new file mode 100644 index 000000000..fc3ae84f5 --- /dev/null +++ b/test/expected-output-test.bats.template @@ -0,0 +1,85 @@ +#!/usr/bin/env bats +# Expected output test with Bats +# +# These test cases compares outputs of various modes of ddlog against a .ddl example file with its expected output. +# Tests are skipped if no expected output is found. +# +# This .bats.template file should be run by creating a .bats symlink to it +# next to a directory with the same name (without .bats) that contains +# input.ddl. + +# required variables +: ${DDLOG_JAR:?path to ddlog jar} + +# some shorthands +cd "$BATS_TEST_DIRNAME" +TESTDIR=${BATS_TEST_FILENAME%.bats} +TESTDIR=${TESTDIR#$PWD/} +TEST=${BATS_TEST_FILENAME#$PWD/} +it="${TEST%.bats}:" + +# how to invoke ddlog compiler +ddlog() { + scala "$DDLOG_JAR" "$@" +} + +# some preconditions +setup() { + [ -e "$DDLOG_JAR" ] + [ -e "$TESTDIR" ] +} + +## tests for basic compilation and pretty-printing + +# check if example can be parsed +@test "$it parses input" { + ddlog print "$TESTDIR"/input.ddl >/dev/null +} + +# check if print is idempotent +@test "$it parses and prints what it prints (idempotent)" { + printed=$TESTDIR/print-idempotent.actual + ddlog print "$TESTDIR"/input.ddl >"$printed" || skip + ddlog print "$printed" | + diff -u "$printed" - +} + + +# compare the compiled output with what's expected +@test "$it compiles input as expected" { + expectedOutput=$TESTDIR/compile.expected + [ -e "$expectedOutput" ] || skip + ddlog compile "$TESTDIR"/input.ddl | + tee "${expectedOutput%.expected}.actual" | + diff -u "$expectedOutput" - +} + +# compare the pretty-printed output with what's expected +@test "$it prints input as expected" { + expectedOutput=$TESTDIR/print.expected + [ -e "$expectedOutput" ] || skip + ddlog print "$TESTDIR"/input.ddl | + tee "${expectedOutput%.expected}.actual" | + diff -u "$expectedOutput" - +} + + +## tests for --incremental support + +# compare the compiled output of the incremental version with what's expected +@test "$it compiles --incremental input as expected" { + expectedOutput=$TESTDIR/compile-incremental.expected + [ -e "$expectedOutput" ] || skip + ddlog compile --incremental "$TESTDIR"/input.ddl | + tee "${expectedOutput%.expected}.actual" | + diff -u "$expectedOutput" - +} + +# compare the pretty-printed output of the incremental version with what's expected +@test "$it prints --incremental input as expected" { + expectedOutput=$TESTDIR/print-incremental.expected + [ -e "$expectedOutput" ] || skip + ddlog print --incremental "$TESTDIR"/input.ddl | + tee "${expectedOutput%.expected}.actual" | + diff -u "$expectedOutput" - +} diff --git a/test/expected-output-test/README.md b/test/expected-output-test/README.md new file mode 100644 index 000000000..2e9915962 --- /dev/null +++ b/test/expected-output-test/README.md @@ -0,0 +1,13 @@ +Here are test cases for running the "expected-output-test". + +Each test case is a directory that contains the following files: + +* `input.ddl` is the input DDLog program. + * Parser is tested against every input program. + * If the input is supposed to cause a parse error, `parse.error.expected` containing an expected error message should be there. +* `print.expected` is the expected output of running the `print` command. Test is skipped if not available. +* `compile.expected` is the expected output of running the `compile` command. Test is skipped if not available. +* `print-incremental.expected` is the expected output of running the `print --incremental` command. Test is skipped if not available. +* `compile-incremental.expected` is the expected output of running the `compile --incremental` command. Test is skipped if not available. + +The tests will create `.actual` files corresponding to each `.expected` file. diff --git a/examples/test-many_joins.ddl b/test/expected-output-test/many_joins/input.ddl similarity index 100% rename from examples/test-many_joins.ddl rename to test/expected-output-test/many_joins/input.ddl diff --git a/examples/test-many_joins.print-incremental.expected b/test/expected-output-test/many_joins/print-incremental.expected similarity index 100% rename from examples/test-many_joins.print-incremental.expected rename to test/expected-output-test/many_joins/print-incremental.expected diff --git a/examples/test-multibody.ddl b/test/expected-output-test/multibody/input.ddl similarity index 100% rename from examples/test-multibody.ddl rename to test/expected-output-test/multibody/input.ddl diff --git a/examples/test-semantics.ddl b/test/expected-output-test/semantics/input.ddl similarity index 100% rename from examples/test-semantics.ddl rename to test/expected-output-test/semantics/input.ddl diff --git a/examples/spouse_example.compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected similarity index 100% rename from examples/spouse_example.compile-incremental.expected rename to test/expected-output-test/spouse_example/compile-incremental.expected diff --git a/examples/spouse_example.compile.expected b/test/expected-output-test/spouse_example/compile.expected similarity index 100% rename from examples/spouse_example.compile.expected rename to test/expected-output-test/spouse_example/compile.expected diff --git a/test/expected-output-test/spouse_example/input.ddl b/test/expected-output-test/spouse_example/input.ddl new file mode 120000 index 000000000..63c01729c --- /dev/null +++ b/test/expected-output-test/spouse_example/input.ddl @@ -0,0 +1 @@ +../../../examples/spouse_example.ddl \ No newline at end of file diff --git a/examples/spouse_example.print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected similarity index 100% rename from examples/spouse_example.print-incremental.expected rename to test/expected-output-test/spouse_example/print-incremental.expected diff --git a/examples/spouse_example.print.expected b/test/expected-output-test/spouse_example/print.expected similarity index 100% rename from examples/spouse_example.print.expected rename to test/expected-output-test/spouse_example/print.expected diff --git a/test/test.sh b/test/test.sh index 643142895..2567f1771 100755 --- a/test/test.sh +++ b/test/test.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash set -eu +shopt -s nullglob cd "$(dirname "$0")" # make sure bats is available @@ -7,21 +8,19 @@ PATH="$PWD/bats/bin:$PATH" type bats &>/dev/null || git clone https://github.com/sstephenson/bats.git -# generate bats tests for every per-example templates -rm -f *.for-example-*.bats -for t in *.bats.per-example; do - testName=${t%.bats.per-example} - # generate one for each example - for ddl in ../examples/*.ddl; do - exampleName=${ddl%.ddl} - exampleName=${exampleName#../examples/} - batsFile="$testName".for-example-"$exampleName".bats - { - printf "EXAMPLE=%q\n" "$ddl" - cat $t - } >$batsFile +# instantiate bats tests templates under its directory +for t in *.bats.template; do + [[ -e "$t" ]] || continue + testSpecDir=${t%.bats.template} + rm -f "$testSpecDir"/*.bats + # create a .bats symlink for each test specification + for testSpec in "$testSpecDir"/*/input.ddl; do + [[ -e "$testSpec" ]] || continue + testSpec=${testSpec%/input.ddl} + batsFile="$testSpec".bats + ln -sfn ../"$t" "$batsFile" done done # run all .bats tests -bats *.bats +bats *.bats */*.bats From 8fd0cd195578d60a56ac501e9bb17bb261397a50 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 05:05:46 -0700 Subject: [PATCH 097/347] Uses wdiff (word diff) when it can in expected output tests --- test/expected-output-test.bats.template | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template index fc3ae84f5..1d39f1c80 100644 --- a/test/expected-output-test.bats.template +++ b/test/expected-output-test.bats.template @@ -22,6 +22,16 @@ it="${TEST%.bats}:" ddlog() { scala "$DDLOG_JAR" "$@" } +# how to diff (prefer wdiff) +if type wdiff &>/dev/null; then + diff() { + wdiff --terminal --statistics -- "$@" + } +else + diff() { + command diff --unified --ignore-all-space -- "$@" + } +fi # some preconditions setup() { @@ -41,7 +51,7 @@ setup() { printed=$TESTDIR/print-idempotent.actual ddlog print "$TESTDIR"/input.ddl >"$printed" || skip ddlog print "$printed" | - diff -u "$printed" - + diff "$printed" - } @@ -51,7 +61,7 @@ setup() { [ -e "$expectedOutput" ] || skip ddlog compile "$TESTDIR"/input.ddl | tee "${expectedOutput%.expected}.actual" | - diff -u "$expectedOutput" - + diff "$expectedOutput" - } # compare the pretty-printed output with what's expected @@ -60,7 +70,7 @@ setup() { [ -e "$expectedOutput" ] || skip ddlog print "$TESTDIR"/input.ddl | tee "${expectedOutput%.expected}.actual" | - diff -u "$expectedOutput" - + diff "$expectedOutput" - } @@ -72,7 +82,7 @@ setup() { [ -e "$expectedOutput" ] || skip ddlog compile --incremental "$TESTDIR"/input.ddl | tee "${expectedOutput%.expected}.actual" | - diff -u "$expectedOutput" - + diff "$expectedOutput" - } # compare the pretty-printed output of the incremental version with what's expected @@ -81,5 +91,5 @@ setup() { [ -e "$expectedOutput" ] || skip ddlog print --incremental "$TESTDIR"/input.ddl | tee "${expectedOutput%.expected}.actual" | - diff -u "$expectedOutput" - + diff "$expectedOutput" - } From 14f90a685ffd8878aaec2f01ea98923525cc142b Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 05:20:34 -0700 Subject: [PATCH 098/347] Updates test/README --- test/README.md | 5 +++-- test/test.sh | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/test/README.md b/test/README.md index 84d2e324d..24a3b94a3 100644 --- a/test/README.md +++ b/test/README.md @@ -4,5 +4,6 @@ DDLog tests `test.sh` runs all tests. [Bats](https://github.com/sstephenson/bats.git) is used for most end-to-end tests. -Every `*-example.bats` will be run for each `.ddl` example under `../examples/`, with the `EXAMPLE` environment variable set to the path to the example. -Rest of the `.bats` tests will run once. +Before running Bats, `.bats` files will be prepared for each `*.bats.template` if they have a directory with the same name (without `.bats.template`) contaning test spec subdirectories. +For each test spec, a `.bats` symlink pointing to the `.bats.template` is created, which is in turn run with Bats. +This way, creating new tests for different inputs is it's very easy as we can simply clone existing test specs. diff --git a/test/test.sh b/test/test.sh index 2567f1771..408322d64 100755 --- a/test/test.sh +++ b/test/test.sh @@ -14,8 +14,8 @@ for t in *.bats.template; do testSpecDir=${t%.bats.template} rm -f "$testSpecDir"/*.bats # create a .bats symlink for each test specification - for testSpec in "$testSpecDir"/*/input.ddl; do - [[ -e "$testSpec" ]] || continue + for testSpec in "$testSpecDir"/*; do + [[ -d "$testSpec" ]] || continue testSpec=${testSpec%/input.ddl} batsFile="$testSpec".bats ln -sfn ../"$t" "$batsFile" From 6eba6eb8d3a2693635988658e874e14a3659d86d Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 05:34:30 -0700 Subject: [PATCH 099/347] Adds a TravisCI config --- .travis.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..1d605eaaa --- /dev/null +++ b/.travis.yml @@ -0,0 +1,5 @@ +scala: + - "2.11.1" +before_install: + - sudo apt-get install -qq make bats wdiff +script: make From 3e680fa5dfd4320ecd55b722868db840b925a779 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 05:36:09 -0700 Subject: [PATCH 100/347] Adds a TravisCI badge to README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9c4866568..d2b647407 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -DeepDiveLogCompiler +DeepDiveLogCompiler [![Build Status](https://travis-ci.org/HazyResearch/ddlog.svg)](https://travis-ci.org/HazyResearch/ddlog) =================== A compiler that enables writing DeepDive apps in a Datalog-like syntax. From b5b2431a19db2c9a87a16e91ba8e04162c582e45 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 06:16:26 -0700 Subject: [PATCH 101/347] Makes test directly use .class files not packaged jar and improves make clean to do a better job --- .gitignore | 2 -- Makefile | 30 +++++++++++++------------ test/expected-output-test.bats.template | 5 ++--- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/.gitignore b/.gitignore index 9ccfab0ac..2fb25d6c0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,4 @@ /ddlog.jar -/ddlog-test.jar -/ddlog-test.jar.classpath target /test/bats diff --git a/Makefile b/Makefile index 32c9c3e24..595218e61 100644 --- a/Makefile +++ b/Makefile @@ -1,35 +1,37 @@ # Makefile for DeepDiveLogCompiler +SOURCE_DIR = src/main/scala/org/deepdive/ddlog +TARGET_DIR = target/scala-2.10/classes +TEST_CLASSPATH_CACHE = $(TARGET_DIR)/dependency-classpath JAR = ddlog.jar -TEST_JAR = ddlog-test.jar -TEST_CLASSPATH_CACHE = ddlog-test.jar.classpath # test .PHONY: test -test: $(TEST_JAR) $(TEST_CLASSPATH_CACHE) - DDLOG_JAR=$(realpath $<) \ -CLASSPATH=$(shell cat $(TEST_CLASSPATH_CACHE)) \ +test: $(TARGET_DIR) $(TEST_CLASSPATH_CACHE) + CLASSPATH=$(realpath $<):$(shell cat $(TEST_CLASSPATH_CACHE)) \ +TEST_CLASS_OR_JAR=org.deepdive.ddlog.DeepDiveLog \ test/test.sh $(TEST_CLASSPATH_CACHE): build.sbt $(wildcard project/*.sbt) sbt "export compile:dependency-classpath" | tail -1 >$@ +$(TARGET_DIR): $(wildcard $(SOURCE_DIR)/*.scala) + sbt compile # test standalone package .PHONY: test-package test-package: $(JAR) - $(MAKE) test TEST_JAR=$< - -# build test jar -$(TEST_JAR): $(wildcard src/main/scala/org/deepdive/ddlog/*.scala) - sbt package - ln -sfn $$(ls -t target/scala-*/*_*.jar | head -1) $@ - touch $@ + CLASSPATH= \ +TEST_CLASS_OR_JAR=$(realpath $(JAR)) \ +test/test.sh # build standalone jar -$(JAR): $(wildcard *.scala) - sbt assembly +$(JAR): $(wildcard $(SOURCE_DIR)/*.scala) + sbt clean assembly ln -sfn $$(ls -t target/scala-*/*-assembly-*.jar | head -1) $@ touch $@ .PHONY: clean clean: sbt clean + # clean test artifacts + rm -f $(JAR) $(TEST_CLASSPATH_CACHE) $(wildcard test/*/*/*.actual) + find test/ -name '*.bats' -type l -exec rm -f {} + diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template index 1d39f1c80..acc0320df 100644 --- a/test/expected-output-test.bats.template +++ b/test/expected-output-test.bats.template @@ -9,7 +9,7 @@ # input.ddl. # required variables -: ${DDLOG_JAR:?path to ddlog jar} +: ${TEST_CLASS_OR_JAR:?class name or path to the ddlog.jar to test} # some shorthands cd "$BATS_TEST_DIRNAME" @@ -20,7 +20,7 @@ it="${TEST%.bats}:" # how to invoke ddlog compiler ddlog() { - scala "$DDLOG_JAR" "$@" + scala "$TEST_CLASS_OR_JAR" "$@" } # how to diff (prefer wdiff) if type wdiff &>/dev/null; then @@ -35,7 +35,6 @@ fi # some preconditions setup() { - [ -e "$DDLOG_JAR" ] [ -e "$TESTDIR" ] } From af13f75b63637480f1456a3ff0705373884d6d13 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 07:05:19 -0700 Subject: [PATCH 102/347] Fixes TravisCI config, adds Coveralls --- .travis.yml | 7 +++++-- Makefile | 18 ++++++++++++++++++ README.md | 27 ++++++++++++++++++--------- project/build.properties | 1 + project/build.sbt | 5 +++++ 5 files changed, 47 insertions(+), 11 deletions(-) create mode 100644 project/build.properties create mode 100644 project/build.sbt diff --git a/.travis.yml b/.travis.yml index 1d605eaaa..881c92d55 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,8 @@ scala: - "2.11.1" before_install: - - sudo apt-get install -qq make bats wdiff -script: make + - sudo apt-get install -qq make wdiff +script: + - make +after_success: + - sbt coverageReport coveralls diff --git a/Makefile b/Makefile index 595218e61..dd6d958d4 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,23 @@ test/test.sh $(TEST_CLASSPATH_CACHE): build.sbt $(wildcard project/*.sbt) sbt "export compile:dependency-classpath" | tail -1 >$@ $(TARGET_DIR): $(wildcard $(SOURCE_DIR)/*.scala) +ifndef MEASURE_COVERAGE sbt compile +else + # enabling coverage measurement + sbt coverage compile +endif + +# test coverage report from a clean build +.PHONY: coverage-report test-coverage +coverage-report: test-coverage + sbt coverageReport +test-coverage: clean + -$(MAKE) test MEASURE_COVERAGE=1 +coveralls: coverage-report + # submit coverage data to https://coveralls.io/r/HazyResearch/ddlog + # (Make sure you have set COVERALLS_REPO_TOKEN=...) + sbt coveralls # test standalone package .PHONY: test-package @@ -24,6 +40,8 @@ TEST_CLASS_OR_JAR=$(realpath $(JAR)) \ test/test.sh # build standalone jar +.PHONY: package +package: $(JAR) $(JAR): $(wildcard $(SOURCE_DIR)/*.scala) sbt clean assembly ln -sfn $$(ls -t target/scala-*/*-assembly-*.jar | head -1) $@ diff --git a/README.md b/README.md index d2b647407..af122c6bd 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,10 @@ -DeepDiveLogCompiler [![Build Status](https://travis-ci.org/HazyResearch/ddlog.svg)](https://travis-ci.org/HazyResearch/ddlog) +DeepDiveLog [![Build Status](https://travis-ci.org/HazyResearch/ddlog.svg)](https://travis-ci.org/HazyResearch/ddlog) [![Coverage Status](https://coveralls.io/repos/HazyResearch/ddlog/badge.svg)](https://coveralls.io/r/HazyResearch/ddlog) =================== -A compiler that enables writing DeepDive apps in a Datalog-like syntax. - -## Testing - -```bash -make -``` +A Datalog-like language for writing DeepDive apps. ## Building -The following command produces a standalone jar. +The following command produces a standalone jar that contains the compiler. ```bash make ddlog.jar @@ -18,8 +12,23 @@ make ddlog.jar ## Running The following will generate an application.conf for the [spouse example in DeepDive's tutorial](http://deepdive.stanford.edu/doc/basics/walkthrough/walkthrough.html). + ```bash mkdir -p examples/spouse_example java -jar ddlog.jar compile examples/spouse_example.ddl >examples/spouse_example/application.conf ``` + +## Testing +The following command runs all tests under test/. + +```bash +make # or make test +``` + +## Coverage +The following command produces a test coverage report. + +```bash +make coverage-report +``` diff --git a/project/build.properties b/project/build.properties new file mode 100644 index 000000000..748703f77 --- /dev/null +++ b/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.7 diff --git a/project/build.sbt b/project/build.sbt new file mode 100644 index 000000000..20c696dc3 --- /dev/null +++ b/project/build.sbt @@ -0,0 +1,5 @@ +resolvers += Classpaths.sbtPluginReleases + +addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.0.4") // for sbt-0.13.5 or higher + +addSbtPlugin("org.scoverage" % "sbt-coveralls" % "1.0.0") // for sbt-0.13.5 or higher From 7631917d45c1e62bfcf461b06d8dea5d2253301a Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 07:15:13 -0700 Subject: [PATCH 103/347] Fixes TravisCI to declare language --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 881c92d55..9782dd099 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,4 @@ +language: scala scala: - "2.11.1" before_install: From e45ca3adc598ee83322ed823a9818df7b4fa4e6f Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 07:22:32 -0700 Subject: [PATCH 104/347] Fixes Travis builds to measure coverage --- .travis.yml | 6 +++--- Makefile | 7 +++---- README.md | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index 9782dd099..db7cc0cee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,8 +2,8 @@ language: scala scala: - "2.11.1" before_install: - - sudo apt-get install -qq make wdiff + - sudo apt-get install -qq scala make wdiff script: - - make + - make test MEASURE_COVERAGE=true after_success: - - sbt coverageReport coveralls + - sbt coveralls diff --git a/Makefile b/Makefile index dd6d958d4..44e19fd75 100644 --- a/Makefile +++ b/Makefile @@ -22,12 +22,11 @@ else endif # test coverage report from a clean build -.PHONY: coverage-report test-coverage -coverage-report: test-coverage - sbt coverageReport +.PHONY: test-coverage coveralls test-coverage: clean -$(MAKE) test MEASURE_COVERAGE=1 -coveralls: coverage-report + sbt coverageReport +coveralls: test-coverage # submit coverage data to https://coveralls.io/r/HazyResearch/ddlog # (Make sure you have set COVERALLS_REPO_TOKEN=...) sbt coveralls diff --git a/README.md b/README.md index af122c6bd..78933f57c 100644 --- a/README.md +++ b/README.md @@ -30,5 +30,5 @@ make # or make test The following command produces a test coverage report. ```bash -make coverage-report +make test-coverage ``` From d162ee44e60493ddc80bee65d2f5d7421157f986 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 07:43:59 -0700 Subject: [PATCH 105/347] Fixes Makefile to play nice with Travis by not using scala command --- .travis.yml | 2 +- Makefile | 4 ++-- test/expected-output-test.bats.template | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index db7cc0cee..eacfee640 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ language: scala scala: - "2.11.1" before_install: - - sudo apt-get install -qq scala make wdiff + - sudo apt-get install -qq make wdiff script: - make test MEASURE_COVERAGE=true after_success: diff --git a/Makefile b/Makefile index 44e19fd75..355386c94 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ SOURCE_DIR = src/main/scala/org/deepdive/ddlog TARGET_DIR = target/scala-2.10/classes -TEST_CLASSPATH_CACHE = $(TARGET_DIR)/dependency-classpath +TEST_CLASSPATH_CACHE = $(TARGET_DIR)/../dependency-classpath JAR = ddlog.jar # test @@ -35,7 +35,7 @@ coveralls: test-coverage .PHONY: test-package test-package: $(JAR) CLASSPATH= \ -TEST_CLASS_OR_JAR=$(realpath $(JAR)) \ +TEST_CLASS_OR_JAR="-jar $(realpath $(JAR))" \ test/test.sh # build standalone jar diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template index acc0320df..174f3a907 100644 --- a/test/expected-output-test.bats.template +++ b/test/expected-output-test.bats.template @@ -20,7 +20,7 @@ it="${TEST%.bats}:" # how to invoke ddlog compiler ddlog() { - scala "$TEST_CLASS_OR_JAR" "$@" + java $TEST_CLASS_OR_JAR "$@" } # how to diff (prefer wdiff) if type wdiff &>/dev/null; then From cea1156554066f593eb43102f71789faed3f955e Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 07:52:56 -0700 Subject: [PATCH 106/347] Forces bats --pretty on Travis --- test/test.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/test.sh b/test/test.sh index 408322d64..36641c621 100755 --- a/test/test.sh +++ b/test/test.sh @@ -22,5 +22,8 @@ for t in *.bats.template; do done done +# TravisCI supports colorful output +! ${TRAVIS:-false} || set -- --pretty "$@" + # run all .bats tests -bats *.bats */*.bats +bats "$@" *.bats */*.bats From a01370e2c7bd88c659dff572914aee5c8c3fa1ee Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 07:55:12 -0700 Subject: [PATCH 107/347] Polishes wdiff output for Travis as well --- test/expected-output-test.bats.template | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template index 174f3a907..cad5ac21b 100644 --- a/test/expected-output-test.bats.template +++ b/test/expected-output-test.bats.template @@ -9,7 +9,7 @@ # input.ddl. # required variables -: ${TEST_CLASS_OR_JAR:?class name or path to the ddlog.jar to test} +: ${TEST_CLASS_OR_JAR:?class name or -jar with the path to the ddlog.jar to test} # some shorthands cd "$BATS_TEST_DIRNAME" @@ -24,13 +24,13 @@ ddlog() { } # how to diff (prefer wdiff) if type wdiff &>/dev/null; then - diff() { - wdiff --terminal --statistics -- "$@" - } + if ${TRAVIS:-false}; then + diff() { wdiff --statistics -- "$@"; } + else + diff() { wdiff --terminal --statistics -- "$@"; } + fi else - diff() { - command diff --unified --ignore-all-space -- "$@" - } + diff() { command diff --unified --ignore-all-space -- "$@"; } fi # some preconditions From 46673c9f96963b238d73bd1d237a415ba4248a80 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 08:08:40 -0700 Subject: [PATCH 108/347] Polishes the tests for readability from Travis Bats' special characters actually aren't shown correctly. --- .travis.yml | 2 +- test/expected-output-test.bats.template | 1 - test/test.sh | 3 --- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index eacfee640..f955b0537 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ language: scala scala: - "2.11.1" before_install: - - sudo apt-get install -qq make wdiff + - sudo apt-get install -qq wdiff script: - make test MEASURE_COVERAGE=true after_success: diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template index cad5ac21b..c61d7ded7 100644 --- a/test/expected-output-test.bats.template +++ b/test/expected-output-test.bats.template @@ -12,7 +12,6 @@ : ${TEST_CLASS_OR_JAR:?class name or -jar with the path to the ddlog.jar to test} # some shorthands -cd "$BATS_TEST_DIRNAME" TESTDIR=${BATS_TEST_FILENAME%.bats} TESTDIR=${TESTDIR#$PWD/} TEST=${BATS_TEST_FILENAME#$PWD/} diff --git a/test/test.sh b/test/test.sh index 36641c621..a0317ea01 100755 --- a/test/test.sh +++ b/test/test.sh @@ -22,8 +22,5 @@ for t in *.bats.template; do done done -# TravisCI supports colorful output -! ${TRAVIS:-false} || set -- --pretty "$@" - # run all .bats tests bats "$@" *.bats */*.bats From 61e9d26f2b920a25abd9fafef29da19f40cf2acc Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 08:12:39 -0700 Subject: [PATCH 109/347] Turns Bats into a submodule --- .gitignore | 1 - .gitmodules | 3 +++ test/bats | 1 + test/test.sh | 2 -- 4 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 .gitmodules create mode 160000 test/bats diff --git a/.gitignore b/.gitignore index 2fb25d6c0..60477fe8b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ /ddlog.jar target -/test/bats /test/expected-output-test/**/*.bats /test/expected-output-test/**/*.actual diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..db077d414 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "test/bats"] + path = test/bats + url = https://github.com/sstephenson/bats.git diff --git a/test/bats b/test/bats new file mode 160000 index 000000000..955309ab9 --- /dev/null +++ b/test/bats @@ -0,0 +1 @@ +Subproject commit 955309ab943ea157ded0c402df98b160bb45ff92 diff --git a/test/test.sh b/test/test.sh index a0317ea01..cf48bf7dc 100755 --- a/test/test.sh +++ b/test/test.sh @@ -5,8 +5,6 @@ cd "$(dirname "$0")" # make sure bats is available PATH="$PWD/bats/bin:$PATH" -type bats &>/dev/null || - git clone https://github.com/sstephenson/bats.git # instantiate bats tests templates under its directory for t in *.bats.template; do From 9745165c47e8c6eaf54aebf7ac410e0ff80441f7 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 08:22:08 -0700 Subject: [PATCH 110/347] Polishes wdiff error: .actual instead of (null) --- test/expected-output-test.bats.template | 30 ++++++++++++------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template index c61d7ded7..bc6a26573 100644 --- a/test/expected-output-test.bats.template +++ b/test/expected-output-test.bats.template @@ -46,29 +46,29 @@ setup() { # check if print is idempotent @test "$it parses and prints what it prints (idempotent)" { - printed=$TESTDIR/print-idempotent.actual + printed=$TESTDIR/printed.actual ddlog print "$TESTDIR"/input.ddl >"$printed" || skip - ddlog print "$printed" | - diff "$printed" - + ddlog print "$printed" >"$TESTDIR"/printed-printed.actual + diff "$printed" "$TESTDIR"/printed-printed.actual } # compare the compiled output with what's expected @test "$it compiles input as expected" { expectedOutput=$TESTDIR/compile.expected + actualOutput=${expectedOutput%.expected}.actual [ -e "$expectedOutput" ] || skip - ddlog compile "$TESTDIR"/input.ddl | - tee "${expectedOutput%.expected}.actual" | - diff "$expectedOutput" - + ddlog compile "$TESTDIR"/input.ddl >"$actualOutput" + diff "$expectedOutput" "$actualOutput" } # compare the pretty-printed output with what's expected @test "$it prints input as expected" { expectedOutput=$TESTDIR/print.expected + actualOutput=${expectedOutput%.expected}.actual [ -e "$expectedOutput" ] || skip - ddlog print "$TESTDIR"/input.ddl | - tee "${expectedOutput%.expected}.actual" | - diff "$expectedOutput" - + ddlog print "$TESTDIR"/input.ddl >"$actualOutput" + diff "$expectedOutput" "$actualOutput" } @@ -77,17 +77,17 @@ setup() { # compare the compiled output of the incremental version with what's expected @test "$it compiles --incremental input as expected" { expectedOutput=$TESTDIR/compile-incremental.expected + actualOutput=${expectedOutput%.expected}.actual [ -e "$expectedOutput" ] || skip - ddlog compile --incremental "$TESTDIR"/input.ddl | - tee "${expectedOutput%.expected}.actual" | - diff "$expectedOutput" - + ddlog compile --incremental "$TESTDIR"/input.ddl >"$actualOutput" + diff "$expectedOutput" "$actualOutput" } # compare the pretty-printed output of the incremental version with what's expected @test "$it prints --incremental input as expected" { expectedOutput=$TESTDIR/print-incremental.expected + actualOutput=${expectedOutput%.expected}.actual [ -e "$expectedOutput" ] || skip - ddlog print --incremental "$TESTDIR"/input.ddl | - tee "${expectedOutput%.expected}.actual" | - diff "$expectedOutput" - + ddlog print --incremental "$TESTDIR"/input.ddl >"$actualOutput" + diff "$expectedOutput" "$actualOutput" } From 2fd2c978707ae5bc97854ecdef57b6dc068afa37 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 15:24:57 -0700 Subject: [PATCH 111/347] Adds tests for confirming parse errors --- .gitignore | 4 +- test/bats-template.bash | 27 ++++++++++++ test/expected-output-test.bats.template | 43 ++++++------------- test/expected-output-test/comments/input.ddl | 2 +- test/parse-error-test.bats.template | 21 +++++++++ .../nested_multiline_comments/input.ddl | 1 + .../parse-error.expected | 4 ++ 7 files changed, 69 insertions(+), 33 deletions(-) create mode 100644 test/bats-template.bash create mode 100644 test/parse-error-test.bats.template create mode 100644 test/parse-error-test/nested_multiline_comments/input.ddl create mode 100644 test/parse-error-test/nested_multiline_comments/parse-error.expected diff --git a/.gitignore b/.gitignore index 60477fe8b..61245514c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ /ddlog.jar target -/test/expected-output-test/**/*.bats -/test/expected-output-test/**/*.actual +/test/*-test/**/*.bats +/test/*-test/**/*.actual diff --git a/test/bats-template.bash b/test/bats-template.bash new file mode 100644 index 000000000..319ed5004 --- /dev/null +++ b/test/bats-template.bash @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Utilities for testing with Bats + +# required variables +: ${TEST_CLASS_OR_JAR:?class name or -jar with the path to the ddlog.jar to test} + +# some shorthands +TESTDIR=${BATS_TEST_FILENAME%.bats} +TESTDIR=${TESTDIR#$PWD/} +TEST=${BATS_TEST_FILENAME#$PWD/} +it="${TEST%.bats}:" + +# how to invoke ddlog compiler +ddlog() { + java $TEST_CLASS_OR_JAR "$@" +} +# how to diff (prefer wdiff) +if type wdiff &>/dev/null; then + if ${TRAVIS:-false}; then + diff() { wdiff --statistics -- "$@"; } + else + diff() { wdiff --terminal --statistics -- "$@"; } + fi +else + diff() { command diff --unified --ignore-all-space -- "$@"; } +fi + diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template index bc6a26573..59be8b438 100644 --- a/test/expected-output-test.bats.template +++ b/test/expected-output-test.bats.template @@ -1,36 +1,10 @@ #!/usr/bin/env bats -# Expected output test with Bats -# +# Expected output tests +# # These test cases compares outputs of various modes of ddlog against a .ddl example file with its expected output. # Tests are skipped if no expected output is found. -# -# This .bats.template file should be run by creating a .bats symlink to it -# next to a directory with the same name (without .bats) that contains -# input.ddl. -# required variables -: ${TEST_CLASS_OR_JAR:?class name or -jar with the path to the ddlog.jar to test} - -# some shorthands -TESTDIR=${BATS_TEST_FILENAME%.bats} -TESTDIR=${TESTDIR#$PWD/} -TEST=${BATS_TEST_FILENAME#$PWD/} -it="${TEST%.bats}:" - -# how to invoke ddlog compiler -ddlog() { - java $TEST_CLASS_OR_JAR "$@" -} -# how to diff (prefer wdiff) -if type wdiff &>/dev/null; then - if ${TRAVIS:-false}; then - diff() { wdiff --statistics -- "$@"; } - else - diff() { wdiff --terminal --statistics -- "$@"; } - fi -else - diff() { command diff --unified --ignore-all-space -- "$@"; } -fi +source bats-template.bash # for $TESTDIR, $it, etc. # some preconditions setup() { @@ -41,7 +15,16 @@ setup() { # check if example can be parsed @test "$it parses input" { - ddlog print "$TESTDIR"/input.ddl >/dev/null + expectedError="$TESTDIR"/parse-error.expected + if [ -e "$expectedError" ]; then + # input is expected to have parse errors + actualError=${expectedError%.expected}.actual + ! ddlog print "$TESTDIR"/input.ddl >/dev/null 2>"$actualError" + diff "$expectedError" "$actualError" + else + # input is supposed to parse without errors + ddlog print "$TESTDIR"/input.ddl >/dev/null + fi } # check if print is idempotent diff --git a/test/expected-output-test/comments/input.ddl b/test/expected-output-test/comments/input.ddl index 147644b75..f34c68b9d 100644 --- a/test/expected-output-test/comments/input.ddl +++ b/test/expected-output-test/comments/input.ddl @@ -13,4 +13,4 @@ R(x) :- S(x, y, z). Multi -line comments - /* unfortunately cannot be nested */ +*/ diff --git a/test/parse-error-test.bats.template b/test/parse-error-test.bats.template new file mode 100644 index 000000000..b15cc80d5 --- /dev/null +++ b/test/parse-error-test.bats.template @@ -0,0 +1,21 @@ +#!/usr/bin/env bats +# Parse error test +# +# The test case here feeds a malformed .ddl into ddlog's print command and compares whether it produces an expected error. + +source bats-template.bash # for $TESTDIR, $it, etc. + +# some preconditions +setup() { + [ -e "$TESTDIR" ] + expectedError="$TESTDIR"/parse-error.expected + [ -e "$expectedError" ] + actualError=${expectedError%.expected}.actual +} + +# check if input produces a parse error +@test "$it parses input" { + ! ddlog print "$TESTDIR"/input.ddl >/dev/null 2>"$actualError" + diff "$expectedError" "$actualError" +} + diff --git a/test/parse-error-test/nested_multiline_comments/input.ddl b/test/parse-error-test/nested_multiline_comments/input.ddl new file mode 100644 index 000000000..82e085022 --- /dev/null +++ b/test/parse-error-test/nested_multiline_comments/input.ddl @@ -0,0 +1 @@ +/* Multi-line comments /* unfortunately cannot be nested */ */ diff --git a/test/parse-error-test/nested_multiline_comments/parse-error.expected b/test/parse-error-test/nested_multiline_comments/parse-error.expected new file mode 100644 index 000000000..366b77ddc --- /dev/null +++ b/test/parse-error-test/nested_multiline_comments/parse-error.expected @@ -0,0 +1,4 @@ +[error] parse-error-test/nested_multiline_comments/input.ddl[1.61] failure: string matching regex `\p{javaJavaIdentifierStart}\p{javaJavaIdentifierPart}*' expected but `*' found + +/* Multi-line comments /* unfortunately cannot be nested */ */ + ^ From 2c4a6eddd3404856a821d8fbddf1cf122667f042 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Sat, 23 May 2015 15:52:49 -0700 Subject: [PATCH 112/347] Makes `make test-coverage` incremental by avoiding clean It turns out `sbt coverage compile` takes care of rebuilding the instrumented .classes. --- Makefile | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 355386c94..efb9b5f84 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ SOURCE_DIR = src/main/scala/org/deepdive/ddlog TARGET_DIR = target/scala-2.10/classes +COVERAGE_DIR = $(TARGET_DIR)/../scoverage-data TEST_CLASSPATH_CACHE = $(TARGET_DIR)/../dependency-classpath JAR = ddlog.jar @@ -13,18 +14,22 @@ TEST_CLASS_OR_JAR=org.deepdive.ddlog.DeepDiveLog \ test/test.sh $(TEST_CLASSPATH_CACHE): build.sbt $(wildcard project/*.sbt) sbt "export compile:dependency-classpath" | tail -1 >$@ -$(TARGET_DIR): $(wildcard $(SOURCE_DIR)/*.scala) + +SOURCES = $(wildcard $(SOURCE_DIR)/*.scala) ifndef MEASURE_COVERAGE +$(TARGET_DIR): $(SOURCES) sbt compile else +$(TARGET_DIR): $(COVERAGE_DIR) +$(COVERAGE_DIR): $(SOURCES) # enabling coverage measurement sbt coverage compile endif -# test coverage report from a clean build +# test coverage report .PHONY: test-coverage coveralls -test-coverage: clean - -$(MAKE) test MEASURE_COVERAGE=1 +test-coverage: + -$(MAKE) test MEASURE_COVERAGE=true sbt coverageReport coveralls: test-coverage # submit coverage data to https://coveralls.io/r/HazyResearch/ddlog From 0d58a32d7630b32df7938af15452a785580edfe7 Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 26 May 2015 21:57:25 -0700 Subject: [PATCH 113/347] fix bugs in test --- .../ddlog/DeepDiveLogDeltaDeriver.scala | 4 +- .../many_joins/print-incremental.expected | 43 ++++++++++++------- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index fda08cc32..b4ac0c56c 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -96,8 +96,8 @@ object DeepDiveLogDeltaDeriver{ ) if (!stmt.isQuery) incNewStmt = incNewStmt.copy( a = incNewStmt.a.copy( - terms = stmt.a.terms :+ Variable("dd_count", newPrefix + stmt.a.name, stmt.a.terms.length), - types = stmt.a.types :+ "int" + terms = incNewStmt.a.terms :+ Variable("dd_count", newPrefix + stmt.a.name, stmt.a.terms.length), + types = incNewStmt.a.types :+ "int" ) ) incrementalStatement += incNewStmt diff --git a/test/expected-output-test/many_joins/print-incremental.expected b/test/expected-output-test/many_joins/print-incremental.expected index c0e725b31..745d8e067 100644 --- a/test/expected-output-test/many_joins/print-incremental.expected +++ b/test/expected-output-test/many_joins/print-incremental.expected @@ -2,41 +2,52 @@ R?(x text). dd_delta_R?(x text). -S(x text). +dd_new_R?(x text). -dd_delta_S(x text). +S(x text, + dd_count int). -dd_new_S(x text). +dd_delta_S(x text, + dd_count int). -dd_new_S(x) :- - S(x). +dd_new_S(x text, + dd_count int). -dd_new_S(x) :- - dd_delta_S(x). +dd_new_S(x, dd_count) :- + S(x, dd_count); + dd_delta_S(x, dd_count). T(x text, - f text). + f text, + dd_count int). dd_delta_T(x text, - f text). + f text, + dd_count int). dd_new_T(x text, - f text). + f text, + dd_count int). dd_new_T(x, f, dd_count) :- T(x, f, dd_count); dd_delta_T(x, f, dd_count). U(x text, - l text). + l text, + dd_count int). dd_delta_U(x text, - l text). + l text, + dd_count int). -R(x) :- - S(x), - T(x, f), - U(x, l). +dd_new_U(x text, + l text, + dd_count int). + +dd_new_U(x, l, dd_count) :- + U(x, l, dd_count); + dd_delta_U(x, l, dd_count). dd_delta_R(x) :- dd_delta_S(x), From 7f6a09ebfda5bf55ae409b6d1e48b5a0c619339c Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 26 May 2015 23:45:12 -0700 Subject: [PATCH 114/347] support pipeplines: 1. Add three pipelines: initdb, extraction, inference 2. Create tables for all schema declaration 3. For extraction rules, using TRUNCATE/INSERT INTO if it contains schema, otherwise using DROP VIEW IF EXISTS/CREATE VIEW. 4. Change inference rule index to global statement index for creating inference pipeline. --- examples/spouse_example.ddl | 20 -- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 100 ++++-- .../compile-incremental.expected | 315 ++++++++++++++---- .../spouse_example/compile.expected | 97 +++++- .../spouse_example/print-incremental.expected | 72 ---- .../spouse_example/print.expected | 17 - 6 files changed, 404 insertions(+), 217 deletions(-) diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index 1e910528d..1e3e0304b 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -37,11 +37,6 @@ has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). -ext_people_input( - sentence_id text, - words text[], - ner_tags text[]). - ext_people_input(s, words, ner_tags) :- sentences(a, b, words, c, d, e, ner_tags, f, s). @@ -52,13 +47,6 @@ function ext_people over like ext_people_input has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). -ext_has_spouse_input( - sentence_id text, - p1_id text, - p1_text text, - p2_id text, - p2_text text). - ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, a, b, p1_text, p1_id), people_mentions(s, c, d, p2_text, p2_id). @@ -70,14 +58,6 @@ function ext_has_spouse over like ext_has_spouse_input has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). -ext_has_spouse_features_input( - words text[], - relation_id text, - p1_start_position int, - p1_length int, - p2_start_position int, - p2_length int). - ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index f8cab1cbd..4d63f391e 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -96,11 +96,12 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C var isIncremental : Boolean = false // Mapping head names to the actual statements + var schemaDeclarationGroupByHead : Map[String, List[SchemaDeclaration]] = new HashMap[String, List[SchemaDeclaration]]() var extractionRuleGroupByHead : Map[String, List[ExtractionRule]] = new HashMap[String, List[ExtractionRule]]() var inferenceRuleGroupByHead : Map[String, List[InferenceRule]] = new HashMap[String, List[InferenceRule]]() var functionCallRuleGroupByInput : Map[String, List[FunctionCallRule]] = new HashMap[String, List[FunctionCallRule]]() var functionCallRuleGroupByOutput : Map[String, List[FunctionCallRule]] = new HashMap[String, List[FunctionCallRule]]() - + def init() = { // generate the statements. isIncremental = config.isIncremental @@ -130,12 +131,18 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // Given a statement, resolve its name for the compiled extractor block. def resolveExtractorBlockName(s: Statement): String = { s match { - case s: FunctionCallRule => s"extraction_rule_${statements indexOf s}" - case s: ExtractionRule => s"extraction_rule_${statements indexOf s}" - case s: InferenceRule => s"extraction_rule_${s.q.head.name}" + case s: SchemaDeclaration => s"extraction_rule_${statements indexOf s}" + case s: FunctionCallRule => s"extraction_rule_${statements indexOf s}" + case s: ExtractionRule => s"extraction_rule_${statements indexOf s}" + case s: InferenceRule => s"extraction_rule_${s.q.head.name}" } } + // Given an inference rule, resolve its name for the compiled inference block. + def resolveInferenceBlockName(s: InferenceRule): String = { + s"factor_${s.q.head.name}_${statements indexOf s}" + } + // Given a variable, resolve it. TODO: This should give a warning, // if we encouter a variable that is not in this map, then something // odd has happened. @@ -203,20 +210,24 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // Group statements by head def groupByHead(statements: List[Statement]) = { // Compile compilation states by head name based on type - val extractionRuleToCompile = new ListBuffer[ExtractionRule]() - val inferenceRuleToCompile = new ListBuffer[InferenceRule]() - val functionCallRuleToCompile = new ListBuffer[FunctionCallRule]() + val schemaDeclarationToCompile = new ListBuffer[SchemaDeclaration]() + val extractionRuleToCompile = new ListBuffer[ExtractionRule]() + val inferenceRuleToCompile = new ListBuffer[InferenceRule]() + val functionCallRuleToCompile = new ListBuffer[FunctionCallRule]() + statements foreach (_ match { - case s: ExtractionRule => extractionRuleToCompile += s - case s: FunctionCallRule => functionCallRuleToCompile += s - case s: InferenceRule => inferenceRuleToCompile += s - case _ => + case s: SchemaDeclaration => schemaDeclarationToCompile += s + case s: ExtractionRule => extractionRuleToCompile += s + case s: FunctionCallRule => functionCallRuleToCompile += s + case s: InferenceRule => inferenceRuleToCompile += s + case _ => }) - extractionRuleGroupByHead = extractionRuleToCompile.toList.groupBy(_.q.head.name) - inferenceRuleGroupByHead = inferenceRuleToCompile.toList.groupBy(_.q.head.name) - functionCallRuleGroupByInput = functionCallRuleToCompile.toList.groupBy(_.input) - functionCallRuleGroupByOutput = functionCallRuleToCompile.toList.groupBy(_.output) + schemaDeclarationGroupByHead = schemaDeclarationToCompile.toList.groupBy(_.a.name) + extractionRuleGroupByHead = extractionRuleToCompile.toList.groupBy(_.q.head.name) + inferenceRuleGroupByHead = inferenceRuleToCompile.toList.groupBy(_.q.head.name) + functionCallRuleGroupByInput = functionCallRuleToCompile.toList.groupBy(_.input) + functionCallRuleGroupByOutput = functionCallRuleToCompile.toList.groupBy(_.output) } // Analyze the block visibility among statements @@ -285,6 +296,27 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { type CompiledBlock = String type CompiledBlocks = List[CompiledBlock] + // Generate schema for database + def compileSchemaDeclarations(stmts: List[SchemaDeclaration], ss: CompilationState): CompiledBlocks = { + var schemas = new ListBuffer[String]() + for (stmt <- stmts) { + val columnDecls = stmt.a.terms map { + case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" + } + val indentation = " " * stmt.a.name.length + val blockName = ss.resolveExtractorBlockName(stmt) + schemas += s""" + deepdive.extraction.extractors.${blockName} { + sql: \"\"\" DROP TABLE IF EXISTS ${stmt.a.name} CASCADE; + CREATE TABLE + ${stmt.a.name}(${columnDecls.mkString(",\n" + indentation)}) + \"\"\" + style: "sql_extractor" + }""" + } + schemas.toList + } + // Generate extraction rule part for deepdive def compileExtractionRules(stmts: List[ExtractionRule], ss: CompilationState): CompiledBlocks = { var inputQueries = new ListBuffer[String]() @@ -316,10 +348,13 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } } val blockName = ss.resolveExtractorBlockName(stmts(0)) + val sqlCmdForCleanUp = if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) "TRUNCATE" else "DROP VIEW IF EXISTS" + val sqlCmdForInsert = if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) "INSERT INTO" else "CREATE VIEW" + val useAS = if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) "" else " AS" val extractor = s""" deepdive.extraction.extractors.${blockName} { - sql: \"\"\" DROP VIEW IF EXISTS ${stmts(0).q.head.name}; - CREATE VIEW ${stmts(0).q.head.name} AS ${inputQueries.mkString(" UNION ")} + sql: \"\"\" ${sqlCmdForCleanUp} ${stmts(0).q.head.name}; + ${sqlCmdForInsert} ${stmts(0).q.head.name}${useAS} ${inputQueries.mkString(" UNION ")} \"\"\" style: "sql_extractor" ${ss.generateDependenciesOfCompiledBlockFor(stmts)} @@ -388,10 +423,13 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } } val blockName = ss.resolveExtractorBlockName(zs(0)) + val sqlCmdForCleanUp = if (ss.schemaDeclarationGroupByHead contains zs(0).q.head.name) "TRUNCATE" else "DROP VIEW IF EXISTS" + val sqlCmdForInsert = if (ss.schemaDeclarationGroupByHead contains zs(0).q.head.name) "INSERT INTO" else "CREATE VIEW" + val useAS = if (ss.schemaDeclarationGroupByHead contains zs(0).q.head.name) "" else " AS" val ext = s""" deepdive.extraction.extractors.${blockName} { - sql: \"\"\" DROP TABLE IF EXISTS ${zs(0).q.head.name}; - CREATE TABLE ${zs(0).q.head.name} AS + sql: \"\"\" ${sqlCmdForCleanUp} ${zs(0).q.head.name}; + ${sqlCmdForInsert} ${zs(0).q.head.name}${useAS} ${inputQueries.mkString(" UNION ")} \"\"\" style: "sql_extractor" @@ -403,8 +441,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { if (ss.isQueryTerm(stmts(0).q.head.name)) blocks :::= compileNodeRule(stmts, ss) - val inferenceRuleGroupByHead = stmts.groupBy(_.q.head.name) - var stmtIndex = 0 for (stmt <- stmts) { var inputQueries = new ListBuffer[String]() var func = "" @@ -445,14 +481,14 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } } } + val blockName = ss.resolveInferenceBlockName(stmt) blocks ::= s""" - deepdive.inference.factors.factor_${stmt.q.head.name}_${stmtIndex} { + deepdive.inference.factors.${blockName} { input_query: \"\"\"${inputQueries.mkString(" UNION ")}\"\"\" function: "${func}" weight: "${weight}" } """ - stmtIndex += 1 } blocks.reverse } @@ -473,6 +509,18 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { """) } + def compilePipelines(ss: CompilationState): CompiledBlocks = { + val run = "deepdive.pipeline.run: ${PIPELINE}" + val setup_database_pipeline = ((ss.schemaDeclarationGroupByHead map (_._2)).flatten map {s => ss.resolveExtractorBlockName(s)}).mkString(", ") + val initdb = s"deepdive.pipeline.pipelines.initdb: [${setup_database_pipeline}]" + val extraction = (ss.visible map {s => ss.resolveExtractorBlockName(s)}).mkString(", ") + val extraction_pipeline = s"deepdive.pipeline.pipelines.extraction: [${extraction}]" + val inference = ((ss.inferenceRuleGroupByHead map (_._2)).flatten map {s => ss.resolveInferenceBlockName(s)}).mkString(", ") + val inference_pipeline = s"deepdive.pipeline.pipelines.inference: [${inference}]" + + List(run, initdb, extraction_pipeline, inference_pipeline) + } + // generate variable schema statements def compileVariableSchema(statements: DeepDiveLog.Program, ss: CompilationState): CompiledBlocks = { var schema = Set[String]() @@ -498,11 +546,11 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // derive and compile the program with delta rules instead for incremental version if (config.isIncremental) DeepDiveLogDeltaDeriver.derive(parsedProgram) else parsedProgram - // take an initial pass to analyze the parsed program val state = new CompilationState( programToCompile, config ) val body = new ListBuffer[String]() + body ++= compileSchemaDeclarations((state.schemaDeclarationGroupByHead map (_._2)).flatten.toList, state) state.extractionRuleGroupByHead foreach {keyVal => body ++= compileExtractionRules(keyVal._2, state)} state.functionCallRuleGroupByInput foreach {keyVal => body ++= compileFunctionCallRules(keyVal._2, state)} state.inferenceRuleGroupByHead foreach {keyVal => body ++= compileInferenceRules(keyVal._2, state)} @@ -513,7 +561,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { ::: compileVariableSchema(programToCompile, state) ::: - body.toList + body.toList + ::: + compilePipelines(state) ) // emit the generated code diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index 415587dc8..0ace5843d 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -15,7 +15,223 @@ } - deepdive.extraction.extractors.extraction_rule_42 { + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS sentences CASCADE; + CREATE TABLE + sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; + CREATE TABLE + dd_delta_articles(article_id text, + text text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_12 { + sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; + CREATE TABLE + has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; + CREATE TABLE + dd_new_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_20 { + sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; + CREATE TABLE + has_spouse(relation_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; + CREATE TABLE + dd_delta_has_spouse_features(relation_id text, + feature text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; + CREATE TABLE + dd_new_articles(article_id text, + text text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS articles CASCADE; + CREATE TABLE + articles(article_id text, + text text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_14 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; + CREATE TABLE + dd_new_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; + CREATE TABLE + dd_new_has_spouse_features(relation_id text, + feature text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_sentences CASCADE; + CREATE TABLE + dd_new_sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_people_mentions CASCADE; + CREATE TABLE + dd_delta_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_candidates CASCADE; + CREATE TABLE + dd_delta_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_16 { + sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; + CREATE TABLE + has_spouse_features(relation_id text, + feature text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; + CREATE TABLE + dd_delta_sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_8 { + sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; + CREATE TABLE + people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; + CREATE TABLE + dd_delta_has_spouse(relation_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_22 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse CASCADE; + CREATE TABLE + dd_new_has_spouse(relation_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_30 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" @@ -32,13 +248,13 @@ WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" , "extraction_rule_15" , "extraction_rule_7" , "extraction_rule_11" , "extraction_rule_30" ] + dependencies: [ "extraction_rule_23" , "extraction_rule_15" , "extraction_rule_26" , "extraction_rule_7" , "extraction_rule_11" ] } deepdive.extraction.extractors.extraction_rule_11 { - sql: """ DROP VIEW IF EXISTS dd_new_people_mentions; - CREATE VIEW dd_new_people_mentions AS + sql: """ TRUNCATE dd_new_people_mentions; + INSERT INTO dd_new_people_mentions SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count FROM people_mentions R0 UNION @@ -51,24 +267,9 @@ } - deepdive.extraction.extractors.extraction_rule_27 { - sql: """ DROP VIEW IF EXISTS dd_new_ext_people_input; - CREATE VIEW dd_new_ext_people_input AS - SELECT R0.sentence_id, R0.words, R0.ner_tags, R0.dd_count - FROM ext_people_input R0 - UNION - SELECT R0.sentence_id, R0.words, R0.ner_tags, R0.dd_count - FROM dd_delta_ext_people_input R0 - - """ - style: "sql_extractor" - dependencies: [ "extraction_rule_28" ] - } - - deepdive.extraction.extractors.extraction_rule_3 { - sql: """ DROP VIEW IF EXISTS dd_new_articles; - CREATE VIEW dd_new_articles AS + sql: """ TRUNCATE dd_new_articles; + INSERT INTO dd_new_articles SELECT R0.article_id, R0.text, R0.dd_count FROM articles R0 UNION @@ -81,7 +282,7 @@ } - deepdive.extraction.extractors.extraction_rule_28 { + deepdive.extraction.extractors.extraction_rule_24 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" @@ -94,8 +295,8 @@ deepdive.extraction.extractors.extraction_rule_15 { - sql: """ DROP VIEW IF EXISTS dd_new_has_spouse_candidates; - CREATE VIEW dd_new_has_spouse_candidates AS + sql: """ TRUNCATE dd_new_has_spouse_candidates; + INSERT INTO dd_new_has_spouse_candidates SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count FROM has_spouse_candidates R0 UNION @@ -104,43 +305,13 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_30" ] - } - - - deepdive.extraction.extractors.extraction_rule_41 { - sql: """ DROP VIEW IF EXISTS dd_new_ext_has_spouse_features_input; - CREATE VIEW dd_new_ext_has_spouse_features_input AS - SELECT R0.words, R0.relation_id, R0.p1_start_position, R0.p1_length, R0.p2_start_position, R0.p2_length, R0.dd_count - FROM ext_has_spouse_features_input R0 - UNION - SELECT R0.words, R0.relation_id, R0.p1_start_position, R0.p1_length, R0.p2_start_position, R0.p2_length, R0.dd_count - FROM dd_delta_ext_has_spouse_features_input R0 - - """ - style: "sql_extractor" - dependencies: [ "extraction_rule_42" ] - } - - - deepdive.extraction.extractors.extraction_rule_34 { - sql: """ DROP VIEW IF EXISTS dd_new_ext_has_spouse_input; - CREATE VIEW dd_new_ext_has_spouse_input AS - SELECT R0.sentence_id, R0.p1_id, R0.p1_text, R0.p2_id, R0.p2_text, R0.dd_count - FROM ext_has_spouse_input R0 - UNION - SELECT R0.sentence_id, R0.p1_id, R0.p1_text, R0.p2_id, R0.p2_text, R0.dd_count - FROM dd_delta_ext_has_spouse_input R0 - - """ - style: "sql_extractor" - dependencies: [ "extraction_rule_35" ] + dependencies: [ "extraction_rule_26" ] } deepdive.extraction.extractors.extraction_rule_19 { - sql: """ DROP VIEW IF EXISTS dd_new_has_spouse_features; - CREATE VIEW dd_new_has_spouse_features AS + sql: """ TRUNCATE dd_new_has_spouse_features; + INSERT INTO dd_new_has_spouse_features SELECT R0.relation_id, R0.feature, R0.dd_count FROM has_spouse_features R0 UNION @@ -149,13 +320,13 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_37" ] + dependencies: [ "extraction_rule_29" ] } deepdive.extraction.extractors.extraction_rule_7 { - sql: """ DROP VIEW IF EXISTS dd_new_sentences; - CREATE VIEW dd_new_sentences AS + sql: """ TRUNCATE dd_new_sentences; + INSERT INTO dd_new_sentences SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count FROM sentences R0 UNION @@ -168,7 +339,7 @@ } - deepdive.extraction.extractors.extraction_rule_35 { + deepdive.extraction.extractors.extraction_rule_27 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" @@ -189,33 +360,33 @@ output_relation: "dd_delta_people_mentions" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_28" ] + dependencies: [ "extraction_rule_24" ] } - deepdive.extraction.extractors.extraction_rule_37 { + deepdive.extraction.extractors.extraction_rule_29 { input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input """ output_relation: "dd_delta_has_spouse_features" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_42" ] + dependencies: [ "extraction_rule_30" ] } - deepdive.extraction.extractors.extraction_rule_30 { + deepdive.extraction.extractors.extraction_rule_26 { input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ output_relation: "dd_delta_has_spouse_candidates" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_35" ] + dependencies: [ "extraction_rule_27" ] } deepdive.extraction.extractors.extraction_rule_dd_delta_has_spouse { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse; - CREATE TABLE dd_delta_has_spouse AS + sql: """ TRUNCATE dd_delta_has_spouse; + INSERT INTO dd_delta_has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count FROM dd_delta_has_spouse_candidates R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id @@ -225,11 +396,11 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_30" , "extraction_rule_15" , "extraction_rule_37" ] + dependencies: [ "extraction_rule_26" , "extraction_rule_15" , "extraction_rule_29" ] } - deepdive.inference.factors.factor_dd_delta_has_spouse_0 { + deepdive.inference.factors.factor_dd_delta_has_spouse_32 { input_query: """ SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 @@ -241,3 +412,7 @@ weight: "?(has_spouse_features.R2.feature)" } +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_4, extraction_rule_1, extraction_rule_12, extraction_rule_10, extraction_rule_20, extraction_rule_17, extraction_rule_2, extraction_rule_0, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_16, extraction_rule_5, extraction_rule_8, extraction_rule_21, extraction_rule_22] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_dd_delta_has_spouse, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_32] diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index 2dfb73e7c..b7c45e917 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -15,7 +15,74 @@ } - deepdive.extraction.extractors.extraction_rule_8 { + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS sentences CASCADE; + CREATE TABLE + sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; + CREATE TABLE + has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; + CREATE TABLE + has_spouse(relation_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS articles CASCADE; + CREATE TABLE + articles(article_id text, + text text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; + CREATE TABLE + has_spouse_features(relation_id text, + feature text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; + CREATE TABLE + people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_7 { sql: """ DROP VIEW IF EXISTS ext_people_input; CREATE VIEW ext_people_input AS SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" @@ -27,7 +94,7 @@ } - deepdive.extraction.extractors.extraction_rule_16 { + deepdive.extraction.extractors.extraction_rule_13 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; CREATE VIEW ext_has_spouse_features_input AS SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" @@ -35,11 +102,11 @@ WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_10" , "extraction_rule_6" ] + dependencies: [ "extraction_rule_9" , "extraction_rule_6" ] } - deepdive.extraction.extractors.extraction_rule_12 { + deepdive.extraction.extractors.extraction_rule_10 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; CREATE VIEW ext_has_spouse_input AS SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" @@ -57,44 +124,44 @@ output_relation: "people_mentions" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_8" ] + dependencies: [ "extraction_rule_7" ] } - deepdive.extraction.extractors.extraction_rule_14 { + deepdive.extraction.extractors.extraction_rule_12 { input: """ SELECT * FROM ext_has_spouse_features_input """ output_relation: "has_spouse_features" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_16" ] + dependencies: [ "extraction_rule_13" ] } - deepdive.extraction.extractors.extraction_rule_10 { + deepdive.extraction.extractors.extraction_rule_9 { input: """ SELECT * FROM ext_has_spouse_input """ output_relation: "has_spouse_candidates" udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_12" ] + dependencies: [ "extraction_rule_10" ] } deepdive.extraction.extractors.extraction_rule_has_spouse { - sql: """ DROP TABLE IF EXISTS has_spouse; - CREATE TABLE has_spouse AS + sql: """ TRUNCATE has_spouse; + INSERT INTO has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label FROM has_spouse_candidates R0, has_spouse_features R1 WHERE R1.relation_id = R0.relation_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_10" , "extraction_rule_14" ] + dependencies: [ "extraction_rule_9" , "extraction_rule_12" ] } - deepdive.inference.factors.factor_has_spouse_0 { + deepdive.inference.factors.factor_has_spouse_15 { input_query: """ SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 @@ -103,3 +170,7 @@ weight: "?(has_spouse_features.R2.feature)" } +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_has_spouse, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [factor_has_spouse_15] diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index 0dececb3e..b9ed27b40 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -128,25 +128,6 @@ dd_new_has_spouse?(relation_id text). dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). -ext_people_input(sentence_id text, - words text[], - ner_tags text[], - dd_count int). - -dd_delta_ext_people_input(sentence_id text, - words text[], - ner_tags text[], - dd_count int). - -dd_new_ext_people_input(sentence_id text, - words text[], - ner_tags text[], - dd_count int). - -dd_new_ext_people_input(sentence_id, words, ner_tags, dd_count) :- - ext_people_input(sentence_id, words, ner_tags, dd_count); - dd_delta_ext_people_input(sentence_id, words, ner_tags, dd_count). - dd_delta_ext_people_input(s, words, ner_tags) :- dd_delta_sentences(a, b, words, c, d, e, ner_tags, f, s). @@ -158,31 +139,6 @@ function ext_people dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). -ext_has_spouse_input(sentence_id text, - p1_id text, - p1_text text, - p2_id text, - p2_text text, - dd_count int). - -dd_delta_ext_has_spouse_input(sentence_id text, - p1_id text, - p1_text text, - p2_id text, - p2_text text, - dd_count int). - -dd_new_ext_has_spouse_input(sentence_id text, - p1_id text, - p1_text text, - p2_id text, - p2_text text, - dd_count int). - -dd_new_ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count) :- - ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count); - dd_delta_ext_has_spouse_input(sentence_id, p1_id, p1_text, p2_id, p2_text, dd_count). - dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- dd_delta_people_mentions(s, a, b, p1_text, p1_id), people_mentions(s, c, d, p2_text, p2_id); @@ -197,34 +153,6 @@ function ext_has_spouse dd_delta_has_spouse_features :- !ext_has_spouse_features(dd_delta_ext_has_spouse_features_input). -ext_has_spouse_features_input(words text[], - relation_id text, - p1_start_position int, - p1_length int, - p2_start_position int, - p2_length int, - dd_count int). - -dd_delta_ext_has_spouse_features_input(words text[], - relation_id text, - p1_start_position int, - p1_length int, - p2_start_position int, - p2_length int, - dd_count int). - -dd_new_ext_has_spouse_features_input(words text[], - relation_id text, - p1_start_position int, - p1_length int, - p2_start_position int, - p2_length int, - dd_count int). - -dd_new_ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count) :- - ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count); - dd_delta_ext_has_spouse_features_input(words, relation_id, p1_start_position, p1_length, p2_start_position, p2_length, dd_count). - dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), diff --git a/test/expected-output-test/spouse_example/print.expected b/test/expected-output-test/spouse_example/print.expected index eefd87be5..1e8829521 100644 --- a/test/expected-output-test/spouse_example/print.expected +++ b/test/expected-output-test/spouse_example/print.expected @@ -31,10 +31,6 @@ has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). -ext_people_input(sentence_id text, - words text[], - ner_tags text[]). - ext_people_input(s, words, ner_tags) :- sentences(a, b, words, c, d, e, ner_tags, f, s). @@ -46,12 +42,6 @@ function ext_people has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). -ext_has_spouse_input(sentence_id text, - p1_id text, - p1_text text, - p2_id text, - p2_text text). - ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, a, b, p1_text, p1_id), people_mentions(s, c, d, p2_text, p2_id). @@ -64,13 +54,6 @@ function ext_has_spouse has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). -ext_has_spouse_features_input(words text[], - relation_id text, - p1_start_position int, - p1_length int, - p2_start_position int, - p2_length int). - ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), From a019c1a8be7e8f417c38c3ec0bd56e821d0201ff Mon Sep 17 00:00:00 2001 From: senwu Date: Wed, 27 May 2015 00:59:33 -0700 Subject: [PATCH 115/347] inline dd_count in incremental ddlog, handle dd_count in compile --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 11 ++- .../ddlog/DeepDiveLogDeltaDeriver.scala | 22 +----- .../many_joins/print-incremental.expected | 45 +++++------ .../spouse_example/print-incremental.expected | 75 ++++++++----------- 4 files changed, 57 insertions(+), 96 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 4d63f391e..17d3abc3d 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -300,9 +300,10 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { def compileSchemaDeclarations(stmts: List[SchemaDeclaration], ss: CompilationState): CompiledBlocks = { var schemas = new ListBuffer[String]() for (stmt <- stmts) { - val columnDecls = stmt.a.terms map { + var columnDecls = stmt.a.terms map { case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" } + if (ss.isIncremental && !stmt.isQuery) columnDecls = columnDecls :+ "dd_count int" val indentation = " " * stmt.a.name.length val blockName = ss.resolveExtractorBlockName(stmt) schemas += s""" @@ -327,7 +328,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val qs = new QuerySchema( tmpCq ) // variable columns // dd_new_ tale only need original column name to make sure the schema is the same with original table - val tmpCqIsForNewTable = tmpCq.head.name.startsWith("dd_new_") + var tmpCqIsForNewTable = tmpCq.head.name.startsWith("dd_new_") val resolveColumnFlag = tmpCqIsForNewTable match { case true => OriginalOnly case false => OriginalAndAlias @@ -339,8 +340,10 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val selectStr = variableCols.mkString(", ") // additional dd_count column will be added in incremental version not dd_new_ table // dd_new_ table does not need additional dd_count column - val ddCount = if (ss.isIncremental && !tmpCqIsForNewTable) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" + val ddCount = if (ss.isIncremental) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) { + if (!tmpCqIsForNewTable) s""", ${ddCount} AS \"dd_count\" """ else s", ${ddCount}" + } else "" inputQueries += s""" SELECT ${selectStr}${ddCountStr} diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index b4ac0c56c..3ef2bca6a 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -62,13 +62,7 @@ object DeepDiveLogDeltaDeriver{ def transform(stmt: SchemaDeclaration): List[Statement] = { var incrementalStatement = new ListBuffer[Statement]() // Incremental table - val incStmt = if (stmt.isQuery) stmt else stmt.copy( - a = stmt.a.copy( - terms = stmt.a.terms :+ Variable("dd_count", stmt.a.name, stmt.a.terms.length), - types = stmt.a.types :+ "int" - ) - ) - incrementalStatement += incStmt + incrementalStatement += stmt // Delta table var incDeltaStmt = stmt.copy( @@ -78,12 +72,6 @@ object DeepDiveLogDeltaDeriver{ types = stmt.a.types ) ) - if (!stmt.isQuery) incDeltaStmt = incDeltaStmt.copy( - a = incDeltaStmt.a.copy( - terms = incDeltaStmt.a.terms :+ Variable("dd_count", deltaPrefix + stmt.a.name, stmt.a.terms.length), - types = incDeltaStmt.a.types :+ "int" - ) - ) incrementalStatement += incDeltaStmt // New table @@ -94,17 +82,11 @@ object DeepDiveLogDeltaDeriver{ types = stmt.a.types ) ) - if (!stmt.isQuery) incNewStmt = incNewStmt.copy( - a = incNewStmt.a.copy( - terms = incNewStmt.a.terms :+ Variable("dd_count", newPrefix + stmt.a.name, stmt.a.terms.length), - types = incNewStmt.a.types :+ "int" - ) - ) incrementalStatement += incNewStmt if (!stmt.isQuery) { incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(incNewStmt.a.name, incNewStmt.a.terms), - List(List(Atom(incStmt.a.name, incStmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))))) + List(List(Atom(stmt.a.name, stmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))))) } incrementalStatement.toList } diff --git a/test/expected-output-test/many_joins/print-incremental.expected b/test/expected-output-test/many_joins/print-incremental.expected index 745d8e067..1394144d5 100644 --- a/test/expected-output-test/many_joins/print-incremental.expected +++ b/test/expected-output-test/many_joins/print-incremental.expected @@ -4,50 +4,41 @@ dd_delta_R?(x text). dd_new_R?(x text). -S(x text, - dd_count int). +S(x text). -dd_delta_S(x text, - dd_count int). +dd_delta_S(x text). -dd_new_S(x text, - dd_count int). +dd_new_S(x text). -dd_new_S(x, dd_count) :- - S(x, dd_count); - dd_delta_S(x, dd_count). +dd_new_S(x) :- + S(x); + dd_delta_S(x). T(x text, - f text, - dd_count int). + f text). dd_delta_T(x text, - f text, - dd_count int). + f text). dd_new_T(x text, - f text, - dd_count int). + f text). -dd_new_T(x, f, dd_count) :- - T(x, f, dd_count); - dd_delta_T(x, f, dd_count). +dd_new_T(x, f) :- + T(x, f); + dd_delta_T(x, f). U(x text, - l text, - dd_count int). + l text). dd_delta_U(x text, - l text, - dd_count int). + l text). dd_new_U(x text, - l text, - dd_count int). + l text). -dd_new_U(x, l, dd_count) :- - U(x, l, dd_count); - dd_delta_U(x, l, dd_count). +dd_new_U(x, l) :- + U(x, l); + dd_delta_U(x, l). dd_delta_R(x) :- dd_delta_S(x), diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index b9ed27b40..a92177eaa 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -1,18 +1,15 @@ articles(article_id text, - text text, - dd_count int). + text text). dd_delta_articles(article_id text, - text text, - dd_count int). + text text). dd_new_articles(article_id text, - text text, - dd_count int). + text text). -dd_new_articles(article_id, text, dd_count) :- - articles(article_id, text, dd_count); - dd_delta_articles(article_id, text, dd_count). +dd_new_articles(article_id, text) :- + articles(article_id, text); + dd_delta_articles(article_id, text). sentences(document_id text, sentence text, @@ -22,8 +19,7 @@ sentences(document_id text, dependencies text[], ner_tags text[], sentence_offset int, - sentence_id text, - dd_count int). + sentence_id text). dd_delta_sentences(document_id text, sentence text, @@ -33,8 +29,7 @@ dd_delta_sentences(document_id text, dependencies text[], ner_tags text[], sentence_offset int, - sentence_id text, - dd_count int). + sentence_id text). dd_new_sentences(document_id text, sentence text, @@ -44,81 +39,71 @@ dd_new_sentences(document_id text, dependencies text[], ner_tags text[], sentence_offset int, - sentence_id text, - dd_count int). + sentence_id text). -dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count) :- - sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count); - dd_delta_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id, dd_count). +dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) :- + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); + dd_delta_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). people_mentions(sentence_id text, start_position int, length int, text text, - mention_id text, - dd_count int). + mention_id text). dd_delta_people_mentions(sentence_id text, start_position int, length int, text text, - mention_id text, - dd_count int). + mention_id text). dd_new_people_mentions(sentence_id text, start_position int, length int, text text, - mention_id text, - dd_count int). + mention_id text). -dd_new_people_mentions(sentence_id, start_position, length, text, mention_id, dd_count) :- - people_mentions(sentence_id, start_position, length, text, mention_id, dd_count); - dd_delta_people_mentions(sentence_id, start_position, length, text, mention_id, dd_count). +dd_new_people_mentions(sentence_id, start_position, length, text, mention_id) :- + people_mentions(sentence_id, start_position, length, text, mention_id); + dd_delta_people_mentions(sentence_id, start_position, length, text, mention_id). has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int). + is_true boolean). dd_delta_has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int). + is_true boolean). dd_new_has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int). + is_true boolean). -dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count) :- - has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count); - dd_delta_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true, dd_count). +dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true) :- + has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true); + dd_delta_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true). has_spouse_features(relation_id text, - feature text, - dd_count int). + feature text). dd_delta_has_spouse_features(relation_id text, - feature text, - dd_count int). + feature text). dd_new_has_spouse_features(relation_id text, - feature text, - dd_count int). + feature text). -dd_new_has_spouse_features(relation_id, feature, dd_count) :- - has_spouse_features(relation_id, feature, dd_count); - dd_delta_has_spouse_features(relation_id, feature, dd_count). +dd_new_has_spouse_features(relation_id, feature) :- + has_spouse_features(relation_id, feature); + dd_delta_has_spouse_features(relation_id, feature). has_spouse?(relation_id text). From e53d90bc70fe23e0726bbfe4a7f4aa785ac791a9 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Wed, 27 May 2015 05:18:13 -0700 Subject: [PATCH 116/347] Fixes Travis config for coverage submission --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f955b0537..e2b825e41 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,4 +6,4 @@ before_install: script: - make test MEASURE_COVERAGE=true after_success: - - sbt coveralls + - sbt coverageReport coveralls From 9e1eb8a4278bda3e8f974889aa8689011319c40c Mon Sep 17 00:00:00 2001 From: senwu Date: Wed, 27 May 2015 21:55:53 -0700 Subject: [PATCH 117/347] import supervision to extraction rules for tying label. Let compile extraction function to handle scoping rule then remove node query generation in compile inference function. --- examples/spouse_example.ddl | 2 + .../deepdive/ddlog/DeepDiveLogCompiler.scala | 103 +++++++----------- .../ddlog/DeepDiveLogDeltaDeriver.scala | 2 +- .../deepdive/ddlog/DeepDiveLogParser.scala | 13 ++- .../ddlog/DeepDiveLogPrettyPrinter.scala | 8 +- .../compile-incremental.expected | 102 +++++++++-------- .../spouse_example/compile.expected | 43 ++++---- .../spouse_example/print-incremental.expected | 5 + .../spouse_example/print.expected | 5 + 9 files changed, 134 insertions(+), 149 deletions(-) diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index 1e3e0304b..6d186b067 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -68,6 +68,8 @@ function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. +has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. + has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 17d3abc3d..389c19fa3 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -112,7 +112,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C schema += { (r,i) -> n } ground_relations += { r -> !isQuery } // record whether a query or a ground term. } - case ExtractionRule(_) => () + case ExtractionRule(_,_) => () case InferenceRule(_,_,_,_) => () case fdecl : FunctionDeclaration => function_schema += {fdecl.functionName -> fdecl} case FunctionCallRule(_,_,_) => () @@ -234,7 +234,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C def analyzeVisible(statements: List[Statement]) = { extractionRuleGroupByHead foreach {keyVal => visible += keyVal._2(0)} functionCallRuleGroupByInput foreach {keyVal => visible += keyVal._2(0)} - inferenceRuleGroupByHead foreach {keyVal => visible += keyVal._2(0)} + // inferenceRuleGroupByHead foreach {keyVal => visible += keyVal._2(0)} } // Analyze the dependency between statements and construct a graph. @@ -326,28 +326,45 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) // Generate the body of the query. val qs = new QuerySchema( tmpCq ) - // variable columns - // dd_new_ tale only need original column name to make sure the schema is the same with original table - var tmpCqIsForNewTable = tmpCq.head.name.startsWith("dd_new_") - val resolveColumnFlag = tmpCqIsForNewTable match { - case true => OriginalOnly - case false => OriginalAndAlias - } - val variableCols = tmpCq.head.terms flatMap { - case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, tmpCq, resolveColumnFlag) - } + if (ss.inferenceRuleGroupByHead contains stmt.q.head.name) { + if (stmt.supervision == null) ss.error(s"Cannot find supervision for variable ${stmt.q.head.name}.\n") + if (stmt.q.bodies.length > 1) ss.error(s"Scoping rule does not allow disjunction.\n") + val headTerms = tmpCq.head.terms map { + case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" + } + val index = qs.getBodyIndex(stmt.supervision) + val name = ss.resolveName(qs.getVar(stmt.supervision)) + val labelCol = s"R${index}.${name}" + val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") + val ddCount = if (ss.isIncremental) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" + inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} + ${ ss.generateSQLBody(tmpCq) } + """ + } else { + // variable columns + // dd_new_ tale only need original column name to make sure the schema is the same with original table + var tmpCqIsForNewTable = tmpCq.head.name.startsWith("dd_new_") + val resolveColumnFlag = tmpCqIsForNewTable match { + case true => OriginalOnly + case false => OriginalAndAlias + } + val variableCols = tmpCq.head.terms flatMap { + case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, tmpCq, resolveColumnFlag) + } - val selectStr = variableCols.mkString(", ") - // additional dd_count column will be added in incremental version not dd_new_ table - // dd_new_ table does not need additional dd_count column - val ddCount = if (ss.isIncremental) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) { - if (!tmpCqIsForNewTable) s""", ${ddCount} AS \"dd_count\" """ else s", ${ddCount}" - } else "" + val selectStr = variableCols.mkString(", ") + // additional dd_count column will be added in incremental version not dd_new_ table + // dd_new_ table does not need additional dd_count column + val ddCount = if (ss.isIncremental) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) { + if (!tmpCqIsForNewTable) s""", ${ddCount} AS \"dd_count\" """ else s", ${ddCount}" + } else "" - inputQueries += s""" - SELECT ${selectStr}${ddCountStr} - ${ ss.generateSQLBody(tmpCq) }""" + inputQueries += s""" + SELECT ${selectStr}${ddCountStr} + ${ ss.generateSQLBody(tmpCq) }""" + } } } val blockName = ss.resolveExtractorBlockName(stmts(0)) @@ -402,48 +419,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // generate inference rule part for deepdive def compileInferenceRules(stmts: List[InferenceRule], ss: CompilationState): CompiledBlocks = { var blocks = List[String]() - // node query - // generate the node portion (V) of the factor graph - def compileNodeRule(zs: List[InferenceRule], ss: CompilationState) : CompiledBlocks = { - var inputQueries = new ListBuffer[String]() - for (z <- zs) { - for (cqBody <- z.q.bodies) { - val tmpCq = ConjunctiveQuery(z.q.head, List(cqBody)) - val qs = new QuerySchema(tmpCq) - val headTerms = tmpCq.head.terms map { - case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" - } - val index = qs.getBodyIndex(z.supervision) - val name = ss.resolveName(qs.getVar(z.supervision)) - val labelCol = s"R${index}.${name}" - val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") - val ddCount = if (ss.isIncremental) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" - - inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} - ${ ss.generateSQLBody(tmpCq) } - """ - } - } - val blockName = ss.resolveExtractorBlockName(zs(0)) - val sqlCmdForCleanUp = if (ss.schemaDeclarationGroupByHead contains zs(0).q.head.name) "TRUNCATE" else "DROP VIEW IF EXISTS" - val sqlCmdForInsert = if (ss.schemaDeclarationGroupByHead contains zs(0).q.head.name) "INSERT INTO" else "CREATE VIEW" - val useAS = if (ss.schemaDeclarationGroupByHead contains zs(0).q.head.name) "" else " AS" - val ext = s""" - deepdive.extraction.extractors.${blockName} { - sql: \"\"\" ${sqlCmdForCleanUp} ${zs(0).q.head.name}; - ${sqlCmdForInsert} ${zs(0).q.head.name}${useAS} - ${inputQueries.mkString(" UNION ")} - \"\"\" - style: "sql_extractor" - ${ss.generateDependenciesOfCompiledBlockFor(zs)} - } - """ - List(ext) - } - if (ss.isQueryTerm(stmts(0).q.head.name)) - blocks :::= compileNodeRule(stmts, ss) - for (stmt <- stmts) { var inputQueries = new ListBuffer[String]() var func = "" diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 3ef2bca6a..b6af1da2d 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -113,7 +113,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental extraction rule, // create delta rules based on original extraction rule def transform(stmt: ExtractionRule): List[Statement] = { - List(ExtractionRule(transform(stmt.q))) + List(ExtractionRule(transform(stmt.q), stmt.supervision)) } // Incremental function call rule, diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 221b1cfd5..4cc5284c4 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -36,9 +36,9 @@ case class RowWiseLineHandler(format: String, command: String) extends FunctionI trait Statement case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statement // atom and whether this is a query relation. case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration]) extends Statement -case class ExtractionRule(q : ConjunctiveQuery) extends Statement // Extraction rule +case class ExtractionRule(q : ConjunctiveQuery, supervision: String = null) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String, semantics : String = "imply") extends Statement // Weighted rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String, semantics : String = "Imply") extends Statement // Weighted rule // Parser @@ -134,8 +134,9 @@ class DeepDiveLogParser extends JavaTokenParsers { } def extractionRule : Parser[ExtractionRule] = - conjunctiveQuery ^^ { - ExtractionRule(_) + conjunctiveQuery ~ opt(supervision) ^^ { + case (q ~ supervision) => + ExtractionRule(q, supervision.getOrElse(null)) } def functionCallRule : Parser[FunctionCallRule] = @@ -155,10 +156,10 @@ class DeepDiveLogParser extends JavaTokenParsers { def semantics = "semantics" ~> "=" ~> semanticType def inferenceRule : Parser[InferenceRule] = - ( conjunctiveQuery ~ factorWeight ~ supervision ~ semantics + ( conjunctiveQuery ~ factorWeight ~ supervision ~ opt(semantics) ) ^^ { case (q ~ weight ~ supervision ~ semantics) => - InferenceRule(q, weight, supervision, semantics) + InferenceRule(q, weight, supervision, semantics.getOrElse("Imply")) } // rules or schema elements in arbitrary order diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index f0b2a944b..2a00b38f2 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -59,8 +59,10 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { } def print(stmt: ExtractionRule): String = { - s"""${print(stmt.q)}. - |""".stripMargin + print(stmt.q) + + ( if (stmt.supervision == null) "" + else "\n label = " + stmt.supervision + ) + ".\n" } def print(stmt: FunctionCallRule): String = { @@ -81,7 +83,7 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { ) + ( if (stmt.semantics == null) "" else "\n semantics = " + stmt.semantics - ) + "." + ) + ".\n" } override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index 0ace5843d..78d7547de 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -234,17 +234,17 @@ deepdive.extraction.extractors.extraction_rule_30 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" - FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" @@ -255,11 +255,11 @@ deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_people_mentions; INSERT INTO dd_new_people_mentions - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count - FROM people_mentions R0 + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + FROM people_mentions R0 UNION - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count - FROM dd_delta_people_mentions R0 + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + FROM dd_delta_people_mentions R0 """ style: "sql_extractor" @@ -270,11 +270,11 @@ deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_articles; INSERT INTO dd_new_articles - SELECT R0.article_id, R0.text, R0.dd_count - FROM articles R0 + SELECT R0.article_id, R0.text, R0.dd_count + FROM articles R0 UNION - SELECT R0.article_id, R0.text, R0.dd_count - FROM dd_delta_articles R0 + SELECT R0.article_id, R0.text, R0.dd_count + FROM dd_delta_articles R0 """ style: "sql_extractor" @@ -285,8 +285,8 @@ deepdive.extraction.extractors.extraction_rule_24 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS - SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" - FROM dd_delta_sentences R0 + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + FROM dd_delta_sentences R0 """ style: "sql_extractor" @@ -297,11 +297,11 @@ deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE dd_new_has_spouse_candidates; INSERT INTO dd_new_has_spouse_candidates - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count - FROM has_spouse_candidates R0 + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + FROM has_spouse_candidates R0 UNION - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count - FROM dd_delta_has_spouse_candidates R0 + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + FROM dd_delta_has_spouse_candidates R0 """ style: "sql_extractor" @@ -312,11 +312,11 @@ deepdive.extraction.extractors.extraction_rule_19 { sql: """ TRUNCATE dd_new_has_spouse_features; INSERT INTO dd_new_has_spouse_features - SELECT R0.relation_id, R0.feature, R0.dd_count - FROM has_spouse_features R0 + SELECT R0.relation_id, R0.feature, R0.dd_count + FROM has_spouse_features R0 UNION - SELECT R0.relation_id, R0.feature, R0.dd_count - FROM dd_delta_has_spouse_features R0 + SELECT R0.relation_id, R0.feature, R0.dd_count + FROM dd_delta_has_spouse_features R0 """ style: "sql_extractor" @@ -327,11 +327,11 @@ deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_sentences; INSERT INTO dd_new_sentences - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count - FROM sentences R0 + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + FROM sentences R0 UNION - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count - FROM dd_delta_sentences R0 + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + FROM dd_delta_sentences R0 """ style: "sql_extractor" @@ -342,11 +342,11 @@ deepdive.extraction.extractors.extraction_rule_27 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" - FROM dd_delta_people_mentions R0, people_mentions R1 + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_delta_people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id UNION - SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" - FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" @@ -354,6 +354,18 @@ } + deepdive.extraction.extractors.extraction_rule_32 { + sql: """ TRUNCATE dd_delta_has_spouse; + INSERT INTO dd_delta_has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count AS dd_count + FROM dd_delta_has_spouse_candidates R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_26" ] + } + + deepdive.extraction.extractors.extraction_rule_23 { input: """ SELECT * FROM dd_delta_ext_people_input """ @@ -384,23 +396,7 @@ } - deepdive.extraction.extractors.extraction_rule_dd_delta_has_spouse { - sql: """ TRUNCATE dd_delta_has_spouse; - INSERT INTO dd_delta_has_spouse - SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count - FROM dd_delta_has_spouse_candidates R0, has_spouse_features R1 - WHERE R1.relation_id = R0.relation_id - UNION SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count * R1.dd_count AS dd_count - FROM dd_new_has_spouse_candidates R0, dd_delta_has_spouse_features R1 - WHERE R1.relation_id = R0.relation_id - - """ - style: "sql_extractor" - dependencies: [ "extraction_rule_26" , "extraction_rule_15" , "extraction_rule_29" ] - } - - - deepdive.inference.factors.factor_dd_delta_has_spouse_32 { + deepdive.inference.factors.factor_dd_delta_has_spouse_33 { input_query: """ SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 @@ -414,5 +410,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_4, extraction_rule_1, extraction_rule_12, extraction_rule_10, extraction_rule_20, extraction_rule_17, extraction_rule_2, extraction_rule_0, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_16, extraction_rule_5, extraction_rule_8, extraction_rule_21, extraction_rule_22] -deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_dd_delta_has_spouse, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] -deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_32] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_33] diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index b7c45e917..1eee5f63e 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -82,11 +82,23 @@ style: "sql_extractor" } + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ TRUNCATE has_spouse; + INSERT INTO has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label + FROM has_spouse_candidates R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_9" ] + } + + deepdive.extraction.extractors.extraction_rule_7 { sql: """ DROP VIEW IF EXISTS ext_people_input; CREATE VIEW ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" - FROM sentences R0 + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + FROM sentences R0 """ style: "sql_extractor" @@ -97,8 +109,8 @@ deepdive.extraction.extractors.extraction_rule_13 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; CREATE VIEW ext_has_spouse_features_input AS - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" - FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" @@ -109,8 +121,8 @@ deepdive.extraction.extractors.extraction_rule_10 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; CREATE VIEW ext_has_spouse_input AS - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" - FROM people_mentions R0, people_mentions R1 + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + FROM people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" @@ -148,20 +160,7 @@ } - deepdive.extraction.extractors.extraction_rule_has_spouse { - sql: """ TRUNCATE has_spouse; - INSERT INTO has_spouse - SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label - FROM has_spouse_candidates R0, has_spouse_features R1 - WHERE R1.relation_id = R0.relation_id - - """ - style: "sql_extractor" - dependencies: [ "extraction_rule_9" , "extraction_rule_12" ] - } - - - deepdive.inference.factors.factor_has_spouse_15 { + deepdive.inference.factors.factor_has_spouse_16 { input_query: """ SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 @@ -172,5 +171,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] -deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_has_spouse, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_6] -deepdive.pipeline.pipelines.inference: [factor_has_spouse_15] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [factor_has_spouse_16] diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index a92177eaa..de4ffdc89 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -162,6 +162,10 @@ function ext_has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. +dd_delta_has_spouse(rid) :- + dd_delta_has_spouse_candidates(a, b, c, d, rid, l) + label = l. + dd_delta_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f); @@ -170,3 +174,4 @@ dd_delta_has_spouse(rid) :- weight = f label = l semantics = imply. + diff --git a/test/expected-output-test/spouse_example/print.expected b/test/expected-output-test/spouse_example/print.expected index 1e8829521..4f6323266 100644 --- a/test/expected-output-test/spouse_example/print.expected +++ b/test/expected-output-test/spouse_example/print.expected @@ -66,9 +66,14 @@ function ext_has_spouse_features implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l) + label = l. + has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f label = l semantics = imply. + From 35e657a985070197d27f04ec28c5e8b8bdd25a4e Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 28 May 2015 01:15:17 -0700 Subject: [PATCH 118/347] support original, incremental, materialization, merge modes and add corrsponding test examples --- .../org/deepdive/ddlog/DeepDiveLog.scala | 19 +- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 83 +++++--- .../ddlog/DeepDiveLogMergeDeriver.scala | 36 ++++ .../ddlog/DeepDiveLogPrettyPrinter.scala | 12 +- test/expected-output-test.bats.template | 22 +++ .../compile-materialization.expected | 180 ++++++++++++++++++ .../spouse_example/compile-merge.expected | 83 ++++++++ 7 files changed, 402 insertions(+), 33 deletions(-) create mode 100644 src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala create mode 100644 test/expected-output-test/spouse_example/compile-materialization.expected create mode 100644 test/expected-output-test/spouse_example/compile-merge.expected diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala index df647b932..eecb7f9a7 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala @@ -2,18 +2,33 @@ package org.deepdive.ddlog // A command-line interface object DeepDiveLog { + + object Mode extends Enumeration { + type Mode = Value + // Four modes of DDlog compilation: + // ORIGINAL => Generate standard application.conf + // INCREMENTAL => Generate incremental application.conf + // MATERIALIZATION => Materialize existing factor graph for incremental mode + // MERGE => Merge new generated data into original table + val ORIGINAL, INCREMENTAL, MATERIALIZATION, MERGE = Value + + } + import Mode._ + type Program = List[Statement] case class Config ( handler: DeepDiveLogHandler = null , inputFiles: List[String] = List() - , isIncremental: Boolean = false + , mode: Mode = ORIGINAL ) val parser = new scopt.OptionParser[Config]("ddlogc") { head("ddlogc", "0.0.1") cmd("compile") required() action { (_, c) => c.copy(handler = DeepDiveLogCompiler) } cmd("print") required() action { (_, c) => c.copy(handler = DeepDiveLogPrettyPrinter) } - opt[Unit]('i', "incremental") optional() action { (_, c) => c.copy(isIncremental = true) } text("Whether to derive delta rules") + opt[Unit]('i', "incremental") optional() action { (_, c) => c.copy(mode = INCREMENTAL) } text("Whether to derive delta rules") + opt[Unit]("materialization") optional() action { (_, c) => c.copy(mode = MATERIALIZATION) } text("Whether to materialize origin data") + opt[Unit]("merge") optional() action { (_, c) => c.copy(mode = MERGE) } text("Whether to merge delta data") arg[String]("FILE...") unbounded() required() action { (f, c) => c.copy(inputFiles = c.inputFiles ++ List(f)) } text("Input DDLog programs files") checkConfig { c => if (c.handler == null) failure("No command specified") diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 389c19fa3..db85a8e66 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -61,6 +61,7 @@ Consider import scala.collection.immutable.HashMap import org.apache.commons.lang3.StringEscapeUtils import scala.collection.mutable.ListBuffer +import org.deepdive.ddlog.DeepDiveLog.Mode._ object AliasStyle extends Enumeration { type AliasStyle = Value @@ -93,7 +94,9 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // The statement whether will compile or union to other statements var visible : Set[Statement] = Set() - var isIncremental : Boolean = false + var mode : Mode = ORIGINAL + + var useDeltaCount : Boolean = false // Mapping head names to the actual statements var schemaDeclarationGroupByHead : Map[String, List[SchemaDeclaration]] = new HashMap[String, List[SchemaDeclaration]]() @@ -104,7 +107,11 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C def init() = { // generate the statements. - isIncremental = config.isIncremental + mode = config.mode + useDeltaCount = mode match { + case ORIGINAL => false + case _ => true + } statements.foreach { case SchemaDeclaration(Attribute(r, terms, types), isQuery) => terms.foreach { @@ -303,7 +310,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var columnDecls = stmt.a.terms map { case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" } - if (ss.isIncremental && !stmt.isQuery) columnDecls = columnDecls :+ "dd_count int" + if (ss.useDeltaCount && !stmt.isQuery) columnDecls = columnDecls :+ "dd_count int" val indentation = " " * stmt.a.name.length val blockName = ss.resolveExtractorBlockName(stmt) schemas += s""" @@ -336,7 +343,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val name = ss.resolveName(qs.getVar(stmt.supervision)) val labelCol = s"R${index}.${name}" val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") - val ddCount = if (ss.isIncremental) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCount = if (ss.useDeltaCount) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} ${ ss.generateSQLBody(tmpCq) } @@ -344,8 +351,11 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } else { // variable columns // dd_new_ tale only need original column name to make sure the schema is the same with original table - var tmpCqIsForNewTable = tmpCq.head.name.startsWith("dd_new_") - val resolveColumnFlag = tmpCqIsForNewTable match { + var tmpCqUseOnlyOriginal = ss.mode match { + case MERGE => true + case _ => if (tmpCq.head.name.startsWith("dd_new_")) true else false + } + val resolveColumnFlag = tmpCqUseOnlyOriginal match { case true => OriginalOnly case false => OriginalAndAlias } @@ -354,27 +364,42 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } val selectStr = variableCols.mkString(", ") - // additional dd_count column will be added in incremental version not dd_new_ table - // dd_new_ table does not need additional dd_count column - val ddCount = if (ss.isIncremental) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + + var ddCount = if (ss.useDeltaCount) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + ddCount = ss.mode match { + case MERGE => s"SUM(${ddCount})" + case _ => ddCount + } val ddCountStr = if (ddCount.length > 0) { - if (!tmpCqIsForNewTable) s""", ${ddCount} AS \"dd_count\" """ else s", ${ddCount}" + if (!tmpCqUseOnlyOriginal) s""", ${ddCount} AS \"dd_count\" """ else s", ${ddCount}" } else "" - + val groupBy = ss.mode match { + case MERGE => s" GROUP BY ${selectStr}" + case _ => "" + } inputQueries += s""" SELECT ${selectStr}${ddCountStr} - ${ ss.generateSQLBody(tmpCq) }""" + ${ ss.generateSQLBody(tmpCq) }${ groupBy }""" } } } val blockName = ss.resolveExtractorBlockName(stmts(0)) - val sqlCmdForCleanUp = if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) "TRUNCATE" else "DROP VIEW IF EXISTS" - val sqlCmdForInsert = if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) "INSERT INTO" else "CREATE VIEW" - val useAS = if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) "" else " AS" + val createTable = ss.mode match { + case MERGE => true + case _ => if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) true else false + } + val sqlCmdForCleanUp = if (createTable) "TRUNCATE" else "DROP VIEW IF EXISTS" + val sqlCmdForInsert = if (createTable) "INSERT INTO" else "CREATE VIEW" + val useAS = if (createTable) "" else " AS" + val cleanUp = ss.mode match { + case MERGE => s"""; + DELETE FROM ${stmts(0).q.head.name} WHERE dd_count = 0;""" + case _ => "" + } val extractor = s""" deepdive.extraction.extractors.${blockName} { sql: \"\"\" ${sqlCmdForCleanUp} ${stmts(0).q.head.name}; - ${sqlCmdForInsert} ${stmts(0).q.head.name}${useAS} ${inputQueries.mkString(" UNION ")} + ${sqlCmdForInsert} ${stmts(0).q.head.name}${useAS} ${inputQueries.mkString(" UNION ")}${cleanUp} \"\"\" style: "sql_extractor" ${ss.generateDependenciesOfCompiledBlockFor(stmts)} @@ -441,7 +466,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val selectStr = (List(variableIdsStr, uwStr) flatten).mkString(", ") - val ddCount = if (ss.isIncremental) ( fakeCQ.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCount = if (ss.useDeltaCount) ( fakeCQ.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" // factor input query @@ -490,13 +515,13 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { def compilePipelines(ss: CompilationState): CompiledBlocks = { val run = "deepdive.pipeline.run: ${PIPELINE}" val setup_database_pipeline = ((ss.schemaDeclarationGroupByHead map (_._2)).flatten map {s => ss.resolveExtractorBlockName(s)}).mkString(", ") - val initdb = s"deepdive.pipeline.pipelines.initdb: [${setup_database_pipeline}]" + val initdb = if (setup_database_pipeline.length > 0) s"deepdive.pipeline.pipelines.initdb: [${setup_database_pipeline}]" else "" val extraction = (ss.visible map {s => ss.resolveExtractorBlockName(s)}).mkString(", ") - val extraction_pipeline = s"deepdive.pipeline.pipelines.extraction: [${extraction}]" + val extraction_pipeline = if (extraction.length > 0) s"deepdive.pipeline.pipelines.extraction: [${extraction}]" else "" val inference = ((ss.inferenceRuleGroupByHead map (_._2)).flatten map {s => ss.resolveInferenceBlockName(s)}).mkString(", ") - val inference_pipeline = s"deepdive.pipeline.pipelines.inference: [${inference}]" + val inference_pipeline = if (inference.length > 0) s"deepdive.pipeline.pipelines.inference: [${inference}]" else "" - List(run, initdb, extraction_pipeline, inference_pipeline) + List(run, initdb, extraction_pipeline, inference_pipeline).filter(_ != "") } // generate variable schema statements @@ -521,9 +546,13 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { // determine the program to compile val programToCompile = - // derive and compile the program with delta rules instead for incremental version - if (config.isIncremental) DeepDiveLogDeltaDeriver.derive(parsedProgram) - else parsedProgram + // derive and compile the program based on mode information + config.mode match { + case ORIGINAL => parsedProgram + case INCREMENTAL => DeepDiveLogDeltaDeriver.derive(parsedProgram) + case MATERIALIZATION => parsedProgram + case MERGE => DeepDiveLogMergeDeriver.derive(parsedProgram) + } // take an initial pass to analyze the parsed program val state = new CompilationState( programToCompile, config ) @@ -547,8 +576,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // emit the generated code blocks foreach println - if (config.isIncremental) { - // TODO emit extra extractor for moving rows of dd_delta_* to * - } + // if (config.isIncremental) { + // // TODO emit extra extractor for moving rows of dd_delta_* to * + // } } } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala new file mode 100644 index 000000000..8be539d0a --- /dev/null +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -0,0 +1,36 @@ +package org.deepdive.ddlog + +import scala.collection.mutable.ListBuffer + +object DeepDiveLogMergeDeriver{ + + // Default prefix for incremental tables + val newPrefix = "dd_new_" + + // Incremental scheme declaration, + // keep the original scheme and create one delta scheme + def transform(stmt: SchemaDeclaration): Statement = { + // New table + var incNewStmt = stmt.copy( + a = stmt.a.copy( + name = newPrefix + stmt.a.name, + terms = stmt.a.terms map {term => term.copy(relName = newPrefix + term.relName)}, + types = stmt.a.types + ) + ) + + ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, stmt.a.terms), + List(List(Atom(incNewStmt.a.name, incNewStmt.a.terms))))) + } + + def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { + var mergeProgram = new ListBuffer[Statement]() + program foreach { x => + x match { + case x: SchemaDeclaration => if (!x.isQuery) mergeProgram += transform(x) + case _ => + } + } + mergeProgram.toList + } +} diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 2a00b38f2..d9054c65c 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -1,6 +1,7 @@ package org.deepdive.ddlog import org.apache.commons.lang3.StringEscapeUtils +import org.deepdive.ddlog.DeepDiveLog.Mode._ // Pretty printer that simply prints the parsed input object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { @@ -88,10 +89,13 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { val programToPrint = - // derive the delta rules for incremental version - if (config.isIncremental) DeepDiveLogDeltaDeriver.derive(parsedProgram) - else parsedProgram - + // derive the program based on mode information + config.mode match { + case ORIGINAL => parsedProgram + case INCREMENTAL => DeepDiveLogDeltaDeriver.derive(parsedProgram) + case MATERIALIZATION => parsedProgram + case MERGE => DeepDiveLogMergeDeriver.derive(parsedProgram) + } // pretty print in original syntax programToPrint foreach {stmt => println(print(stmt))} } diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template index 59be8b438..d37846df3 100644 --- a/test/expected-output-test.bats.template +++ b/test/expected-output-test.bats.template @@ -74,3 +74,25 @@ setup() { ddlog print --incremental "$TESTDIR"/input.ddl >"$actualOutput" diff "$expectedOutput" "$actualOutput" } + +## tests for --merge support + +# compare the compiled output of the merge version with what's expected +@test "$it compiles --merge input as expected" { + expectedOutput=$TESTDIR/compile-merge.expected + actualOutput=${expectedOutput%.expected}.actual + [ -e "$expectedOutput" ] || skip + ddlog compile --merge "$TESTDIR"/input.ddl >"$actualOutput" + diff "$expectedOutput" "$actualOutput" +} + +## tests for --materialization support + +# compare the compiled output of the merge version with what's expected +@test "$it compiles --materialization input as expected" { + expectedOutput=$TESTDIR/compile-materialization.expected + actualOutput=${expectedOutput%.expected}.actual + [ -e "$expectedOutput" ] || skip + ddlog compile --materialization "$TESTDIR"/input.ddl >"$actualOutput" + diff "$expectedOutput" "$actualOutput" +} diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected new file mode 100644 index 000000000..050204c9b --- /dev/null +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -0,0 +1,180 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + } + + + deepdive.schema.variables { + has_spouse.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS sentences CASCADE; + CREATE TABLE + sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; + CREATE TABLE + has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; + CREATE TABLE + has_spouse(relation_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS articles CASCADE; + CREATE TABLE + articles(article_id text, + text text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; + CREATE TABLE + has_spouse_features(relation_id text, + feature text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; + CREATE TABLE + people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ TRUNCATE has_spouse; + INSERT INTO has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count AS dd_count + FROM has_spouse_candidates R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_9" ] + } + + + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ DROP VIEW IF EXISTS ext_people_input; + CREATE VIEW ext_people_input AS + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + FROM sentences R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; + CREATE VIEW ext_has_spouse_features_input AS + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_9" , "extraction_rule_6" ] + } + + + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; + CREATE VIEW ext_has_spouse_input AS + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM people_mentions R0, people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_6" ] + } + + + deepdive.extraction.extractors.extraction_rule_6 { + input: """ SELECT * FROM ext_people_input + """ + output_relation: "people_mentions" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_7" ] + } + + + deepdive.extraction.extractors.extraction_rule_12 { + input: """ SELECT * FROM ext_has_spouse_features_input + """ + output_relation: "has_spouse_features" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_13" ] + } + + + deepdive.extraction.extractors.extraction_rule_9 { + input: """ SELECT * FROM ext_has_spouse_input + """ + output_relation: "has_spouse_candidates" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_10" ] + } + + + deepdive.inference.factors.factor_has_spouse_16 { + input_query: """ + SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + function: "Imply(has_spouse.R0.label)" + weight: "?(has_spouse_features.R2.feature)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [factor_has_spouse_16] diff --git a/test/expected-output-test/spouse_example/compile-merge.expected b/test/expected-output-test/spouse_example/compile-merge.expected new file mode 100644 index 000000000..7a071a982 --- /dev/null +++ b/test/expected-output-test/spouse_example/compile-merge.expected @@ -0,0 +1,83 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + } + + + deepdive.schema.variables { + + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ TRUNCATE sentences; + INSERT INTO sentences + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, SUM(R0.dd_count) + FROM dd_new_sentences R0 + GROUP BY R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id; + DELETE FROM sentences WHERE dd_count = 0; + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ TRUNCATE has_spouse_candidates; + INSERT INTO has_spouse_candidates + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, SUM(R0.dd_count) + FROM dd_new_has_spouse_candidates R0 + GROUP BY R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true; + DELETE FROM has_spouse_candidates WHERE dd_count = 0; + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ TRUNCATE articles; + INSERT INTO articles + SELECT R0.article_id, R0.text, SUM(R0.dd_count) + FROM dd_new_articles R0 + GROUP BY R0.article_id, R0.text; + DELETE FROM articles WHERE dd_count = 0; + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ TRUNCATE has_spouse_features; + INSERT INTO has_spouse_features + SELECT R0.relation_id, R0.feature, SUM(R0.dd_count) + FROM dd_new_has_spouse_features R0 + GROUP BY R0.relation_id, R0.feature; + DELETE FROM has_spouse_features WHERE dd_count = 0; + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ TRUNCATE people_mentions; + INSERT INTO people_mentions + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, SUM(R0.dd_count) + FROM dd_new_people_mentions R0 + GROUP BY R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id; + DELETE FROM people_mentions WHERE dd_count = 0; + """ + style: "sql_extractor" + + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.extraction: [extraction_rule_0, extraction_rule_3, extraction_rule_4, extraction_rule_2, extraction_rule_1] From 1801175d244e15b23474f0a9421dc118261d43bd Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 28 May 2015 12:17:00 -0700 Subject: [PATCH 119/347] support incremental function add an argument in function declaration 'mode = inc' to specific incremental function --- .../compile-incremental.expected | 417 ++++++++++++++++++ .../spouse_example_new_feature/input.ddl | 79 ++++ .../print-incremental.expected | 178 ++++++++ 3 files changed, 674 insertions(+) create mode 100644 test/expected-output-test/spouse_example_new_feature/compile-incremental.expected create mode 100644 test/expected-output-test/spouse_example_new_feature/input.ddl create mode 100644 test/expected-output-test/spouse_example_new_feature/print-incremental.expected diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected new file mode 100644 index 000000000..7a544e25a --- /dev/null +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -0,0 +1,417 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + } + + + deepdive.schema.variables { + dd_delta_has_spouse.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS sentences CASCADE; + CREATE TABLE + sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; + CREATE TABLE + dd_delta_articles(article_id text, + text text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_12 { + sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; + CREATE TABLE + has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; + CREATE TABLE + dd_new_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_20 { + sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; + CREATE TABLE + has_spouse(relation_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; + CREATE TABLE + dd_delta_has_spouse_features(relation_id text, + feature text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; + CREATE TABLE + dd_new_articles(article_id text, + text text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS articles CASCADE; + CREATE TABLE + articles(article_id text, + text text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_14 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; + CREATE TABLE + dd_new_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; + CREATE TABLE + dd_new_has_spouse_features(relation_id text, + feature text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_sentences CASCADE; + CREATE TABLE + dd_new_sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_people_mentions CASCADE; + CREATE TABLE + dd_delta_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_candidates CASCADE; + CREATE TABLE + dd_delta_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_16 { + sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; + CREATE TABLE + has_spouse_features(relation_id text, + feature text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; + CREATE TABLE + dd_delta_sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_8 { + sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; + CREATE TABLE + people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; + CREATE TABLE + dd_delta_has_spouse(relation_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_22 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse CASCADE; + CREATE TABLE + dd_new_has_spouse(relation_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_30 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; + CREATE VIEW dd_delta_ext_has_spouse_features_input AS + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_23" , "extraction_rule_15" , "extraction_rule_26" , "extraction_rule_7" , "extraction_rule_11" ] + } + + + deepdive.extraction.extractors.extraction_rule_11 { + sql: """ TRUNCATE dd_new_people_mentions; + INSERT INTO dd_new_people_mentions + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + FROM people_mentions R0 + UNION + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + FROM dd_delta_people_mentions R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_23" ] + } + + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ TRUNCATE dd_new_articles; + INSERT INTO dd_new_articles + SELECT R0.article_id, R0.text, R0.dd_count + FROM articles R0 + UNION + SELECT R0.article_id, R0.text, R0.dd_count + FROM dd_delta_articles R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_24 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; + CREATE VIEW dd_delta_ext_people_input AS + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + FROM sentences R0 + UNION + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + FROM dd_delta_sentences R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ TRUNCATE dd_new_has_spouse_candidates; + INSERT INTO dd_new_has_spouse_candidates + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + FROM has_spouse_candidates R0 + UNION + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + FROM dd_delta_has_spouse_candidates R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_26" ] + } + + + deepdive.extraction.extractors.extraction_rule_19 { + sql: """ TRUNCATE dd_new_has_spouse_features; + INSERT INTO dd_new_has_spouse_features + SELECT R0.relation_id, R0.feature, R0.dd_count + FROM has_spouse_features R0 + UNION + SELECT R0.relation_id, R0.feature, R0.dd_count + FROM dd_delta_has_spouse_features R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_29" ] + } + + + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ TRUNCATE dd_new_sentences; + INSERT INTO dd_new_sentences + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + FROM sentences R0 + UNION + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + FROM dd_delta_sentences R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_27 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; + CREATE VIEW dd_delta_ext_has_spouse_input AS + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_delta_people_mentions R0, people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id UNION + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_23" , "extraction_rule_11" ] + } + + + deepdive.extraction.extractors.extraction_rule_32 { + sql: """ TRUNCATE dd_delta_has_spouse; + INSERT INTO dd_delta_has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count AS dd_count + FROM dd_delta_has_spouse_candidates R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_26" ] + } + + + deepdive.extraction.extractors.extraction_rule_23 { + input: """ SELECT * FROM dd_delta_ext_people_input + """ + output_relation: "dd_delta_people_mentions" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_24" ] + } + + + deepdive.extraction.extractors.extraction_rule_29 { + input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input + """ + output_relation: "dd_delta_has_spouse_features" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_30" ] + } + + + deepdive.extraction.extractors.extraction_rule_26 { + input: """ SELECT * FROM dd_delta_ext_has_spouse_input + """ + output_relation: "dd_delta_has_spouse_candidates" + udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_27" ] + } + + + deepdive.inference.factors.factor_dd_delta_has_spouse_33 { + input_query: """ + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + function: "Imply(dd_delta_has_spouse.R0.label)" + weight: "?(has_spouse_features.R2.feature)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_4, extraction_rule_1, extraction_rule_12, extraction_rule_10, extraction_rule_20, extraction_rule_17, extraction_rule_2, extraction_rule_0, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_16, extraction_rule_5, extraction_rule_8, extraction_rule_21, extraction_rule_22] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_33] diff --git a/test/expected-output-test/spouse_example_new_feature/input.ddl b/test/expected-output-test/spouse_example_new_feature/input.ddl new file mode 100644 index 000000000..0b22fabfc --- /dev/null +++ b/test/expected-output-test/spouse_example_new_feature/input.ddl @@ -0,0 +1,79 @@ +articles( + article_id text, + text text). + +sentences( + document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text). + +people_mentions( + sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +has_spouse_candidates( + person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +has_spouse_features( + relation_id text, + feature text). + +has_spouse?(relation_id text). + +people_mentions :- + !ext_people(ext_people_input). + +ext_people_input(s, words, ner_tags) :- + sentences(a, b, words, c, d, e, ner_tags, f, s). + +function ext_people over like ext_people_input + returns like people_mentions + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines + mode = inc. + +has_spouse_candidates :- + !ext_has_spouse(ext_has_spouse_input). + +ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id). + +function ext_has_spouse over like ext_has_spouse_input + returns like has_spouse_candidates + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. + +has_spouse_features :- + !ext_has_spouse_features(ext_has_spouse_features_input). + +ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +function ext_has_spouse_features over like ext_has_spouse_features_input + returns like has_spouse_features + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. + +has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. + +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f) +weight = f +label = l +semantics = imply. diff --git a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected new file mode 100644 index 000000000..0e3b00bc6 --- /dev/null +++ b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected @@ -0,0 +1,178 @@ +articles(article_id text, + text text). + +dd_delta_articles(article_id text, + text text). + +dd_new_articles(article_id text, + text text). + +dd_new_articles(article_id, text) :- + articles(article_id, text); + dd_delta_articles(article_id, text). + +sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text). + +dd_delta_sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text). + +dd_new_sentences(document_id text, + sentence text, + words text[], + lemma text[], + pos_tags text[], + dependencies text[], + ner_tags text[], + sentence_offset int, + sentence_id text). + +dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) :- + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); + dd_delta_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). + +people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +dd_delta_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +dd_new_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +dd_new_people_mentions(sentence_id, start_position, length, text, mention_id) :- + people_mentions(sentence_id, start_position, length, text, mention_id); + dd_delta_people_mentions(sentence_id, start_position, length, text, mention_id). + +has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +dd_delta_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +dd_new_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true) :- + has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true); + dd_delta_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true). + +has_spouse_features(relation_id text, + feature text). + +dd_delta_has_spouse_features(relation_id text, + feature text). + +dd_new_has_spouse_features(relation_id text, + feature text). + +dd_new_has_spouse_features(relation_id, feature) :- + has_spouse_features(relation_id, feature); + dd_delta_has_spouse_features(relation_id, feature). + +has_spouse?(relation_id text). + +dd_delta_has_spouse?(relation_id text). + +dd_new_has_spouse?(relation_id text). + +dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). + +dd_delta_ext_people_input(s, words, ner_tags) :- + sentences(a, b, words, c, d, e, ner_tags, f, s); + dd_delta_sentences(a, b, words, c, d, e, ner_tags, f, s). + +function ext_people + over like dd_delta_ext_people_input + returns like dd_delta_people_mentions + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + handles tsv lines mode = inc. + +dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). + +dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + dd_delta_people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id); + dd_new_people_mentions(s, a, b, p1_text, p1_id), + dd_delta_people_mentions(s, c, d, p2_text, p2_id). + +function ext_has_spouse + over like dd_delta_ext_has_spouse_input + returns like dd_delta_has_spouse_candidates + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + handles tsv lines. + +dd_delta_has_spouse_features :- !ext_has_spouse_features(dd_delta_ext_has_spouse_features_input). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id); + dd_new_sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id); + dd_new_sentences(a, b, words, c, d, e, f, g, s), + dd_new_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id); + dd_new_sentences(a, b, words, c, d, e, f, g, s), + dd_new_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_new_people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +function ext_has_spouse_features + over like dd_delta_ext_has_spouse_features_input + returns like dd_delta_has_spouse_features + implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + handles tsv lines. + +dd_delta_has_spouse(rid) :- + dd_delta_has_spouse_candidates(a, b, c, d, rid, l) + label = l. + +dd_delta_has_spouse(rid) :- + dd_delta_has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f); + dd_new_has_spouse_candidates(a, b, c, d, rid, l), + dd_delta_has_spouse_features(rid, f) + weight = f + label = l + semantics = imply. + From 1d2a6e0c95175018e71b4248ef1bf998ddd9b6b9 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 28 May 2015 12:39:51 -0700 Subject: [PATCH 120/347] add modified files for support incremental function --- .../ddlog/DeepDiveLogDeltaDeriver.scala | 20 ++++++++++++++++++- .../deepdive/ddlog/DeepDiveLogParser.scala | 16 +++++++-------- .../ddlog/DeepDiveLogPrettyPrinter.scala | 3 ++- 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index b6af1da2d..db3964570 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -8,6 +8,8 @@ object DeepDiveLogDeltaDeriver{ val deltaPrefix = "dd_delta_" val newPrefix = "dd_new_" + var incrementalFunctionInput = new ListBuffer[String]() + def transform(stmt: Statement): List[Statement] = stmt match { case s: SchemaDeclaration => transform(s) case s: FunctionDeclaration => transform(s) @@ -41,7 +43,8 @@ object DeepDiveLogDeltaDeriver{ } var i = 0 var j = 0 - for (i <- 0 to (body.length - 1)) { + var index = if (incrementalFunctionInput contains incCqHead.name) -1 else 0 + for (i <- index to (body.length - 1)) { var newBody = new ListBuffer[Atom]() for (j <- 0 to (body.length - 1)) { if (j > i) @@ -128,8 +131,23 @@ object DeepDiveLogDeltaDeriver{ List(InferenceRule(transform(stmt.q), stmt.weights, stmt.supervision, stmt.semantics)) } + def generateIncrementalFunctionInputList(program: DeepDiveLog.Program) { + program.foreach { + case x:FunctionDeclaration => if (x.mode == "inc") { + x.inputType match { + case inTy: RelationTypeAlias => incrementalFunctionInput += deltaPrefix + inTy.likeRelationName + case _ => + } + } + case _ => + } + } + def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { var incrementalProgram = new ListBuffer[Statement]() + + generateIncrementalFunctionInputList(program) + for (x <- program) { incrementalProgram = incrementalProgram ++ transform(x) } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 4cc5284c4..dfd1ef9be 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -5,6 +5,7 @@ package org.deepdive.ddlog import scala.util.parsing.combinator._ import org.apache.commons.lang3.StringEscapeUtils +import scala.util.Try // *************************************** // * The union types for for the parser. * @@ -35,7 +36,7 @@ case class RowWiseLineHandler(format: String, command: String) extends FunctionI // Statements that will be parsed and compiled trait Statement case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statement // atom and whether this is a query relation. -case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration]) extends Statement +case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration], mode: String = null) extends Statement case class ExtractionRule(q : ConjunctiveQuery, supervision: String = null) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String, semantics : String = "Imply") extends Statement // Weighted rule @@ -65,6 +66,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def variableName = ident def functionName = ident def semanticType = ident + def functionModeType = ident def columnDeclaration: Parser[Column] = columnName ~ columnType ^^ { @@ -103,10 +105,6 @@ class DeepDiveLogParser extends JavaTokenParsers { def conjunctiveQuery : Parser[ConjunctiveQuery] = cqHead ~ ":-" ~ rep1sep(cqBody, ";") ^^ { case (headatom ~ ":-" ~ disjunctiveBodies) => - // TODO handle all disjunctiveBodies - // XXX only compiling the first body - // val bodyatoms = disjunctiveBodies(0) - // ConjunctiveQuery(headatom, bodyatoms.toList) ConjunctiveQuery(headatom, disjunctiveBodies) } @@ -117,6 +115,8 @@ class DeepDiveLogParser extends JavaTokenParsers { } ) + def functionMode = "mode" ~> "=" ~> functionModeType + def functionImplementation : Parser[FunctionImplementationDeclaration] = "implementation" ~ stringLiteralAsString ~ "handles" ~ ("tsv" | "json") ~ "lines" ^^ { case (_ ~ command ~ _ ~ format ~ _) => RowWiseLineHandler(command=command, format=format) @@ -125,12 +125,12 @@ class DeepDiveLogParser extends JavaTokenParsers { def functionDeclaration : Parser[FunctionDeclaration] = ( "function" ~ functionName ~ "over" ~ relationType ~ "returns" ~ relationType - ~ (functionImplementation+) + ~ (functionImplementation+) ~ opt(functionMode) ) ^^ { case ("function" ~ a ~ "over" ~ inTy ~ "returns" ~ outTy - ~ implementationDecls) => - FunctionDeclaration(a, inTy, outTy, implementationDecls) + ~ implementationDecls ~ mode) => + FunctionDeclaration(a, inTy, outTy, implementationDecls, mode.getOrElse(null)) } def extractionRule : Parser[ExtractionRule] = diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index d9054c65c..56f7249bb 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -40,10 +40,11 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { "\"" + StringEscapeUtils.escapeJava(impl.command) + "\"" + s"\n handles ${impl.format} lines" } + val modeStr = if (stmt.mode == null) "" else s" mode = ${stmt.mode}" s"""function ${stmt.functionName} | over ${inputType} | returns ${outputType} - | ${(impls map {"implementation " + _}).mkString("\n ")}. + | ${(impls map {"implementation " + _}).mkString("\n ")}${modeStr}. |""".stripMargin } From 9d65baa4571ad07b6b999f66e90c33823fc4b842 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 28 May 2015 15:47:31 -0700 Subject: [PATCH 121/347] add cleanup incremental tables in INCREMENTAL mode for supporting adding new data --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 21 +++++++++++++++---- .../compile-incremental.expected | 19 +++++++++++++++++ .../compile-incremental.expected | 19 +++++++++++++++++ 3 files changed, 55 insertions(+), 4 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index db85a8e66..77ef7c509 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -241,7 +241,6 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C def analyzeVisible(statements: List[Statement]) = { extractionRuleGroupByHead foreach {keyVal => visible += keyVal._2(0)} functionCallRuleGroupByInput foreach {keyVal => visible += keyVal._2(0)} - // inferenceRuleGroupByHead foreach {keyVal => visible += keyVal._2(0)} } // Analyze the dependency between statements and construct a graph. @@ -303,7 +302,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { type CompiledBlock = String type CompiledBlocks = List[CompiledBlock] - // Generate schema for database + // Generate schema and cleanup part for database def compileSchemaDeclarations(stmts: List[SchemaDeclaration], ss: CompilationState): CompiledBlocks = { var schemas = new ListBuffer[String]() for (stmt <- stmts) { @@ -322,6 +321,17 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { style: "sql_extractor" }""" } + // Cleanup incremental table extractor + val truncateTableList = (stmts map (x => if (x.a.name.startsWith("dd_")) s"TRUNCATE ${x.a.name};" else "")).filter(_ != "") + if (truncateTableList.length > 0) { + schemas += s""" + deepdive.extraction.extractors.cleanup { + sql: \"\"\" + ${truncateTableList.mkString("\n ")} + \"\"\" + style: "sql_extractor" + }""" + } schemas.toList } @@ -520,8 +530,11 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val extraction_pipeline = if (extraction.length > 0) s"deepdive.pipeline.pipelines.extraction: [${extraction}]" else "" val inference = ((ss.inferenceRuleGroupByHead map (_._2)).flatten map {s => ss.resolveInferenceBlockName(s)}).mkString(", ") val inference_pipeline = if (inference.length > 0) s"deepdive.pipeline.pipelines.inference: [${inference}]" else "" - - List(run, initdb, extraction_pipeline, inference_pipeline).filter(_ != "") + val cleanup_pipeline = ss.mode match { + case INCREMENTAL => if (setup_database_pipeline.length > 0) s"deepdive.pipeline.pipelines.cleanup: [cleanup]" else "" + case _ => "" + } + List(run, initdb, extraction_pipeline, inference_pipeline, cleanup_pipeline).filter(_ != "") } // generate variable schema statements diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index 78d7547de..e8292ef3e 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -231,6 +231,24 @@ style: "sql_extractor" } + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE dd_delta_articles; + TRUNCATE dd_new_people_mentions; + TRUNCATE dd_delta_has_spouse_features; + TRUNCATE dd_new_articles; + TRUNCATE dd_new_has_spouse_candidates; + TRUNCATE dd_new_has_spouse_features; + TRUNCATE dd_new_sentences; + TRUNCATE dd_delta_people_mentions; + TRUNCATE dd_delta_has_spouse_candidates; + TRUNCATE dd_delta_sentences; + TRUNCATE dd_delta_has_spouse; + TRUNCATE dd_new_has_spouse; + """ + style: "sql_extractor" + } + deepdive.extraction.extractors.extraction_rule_30 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS @@ -412,3 +430,4 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_4, extraction_rule_1, extraction_rule_12, extraction_rule_10, extraction_rule_20, extraction_rule_17, extraction_rule_2, extraction_rule_0, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_16, extraction_rule_5, extraction_rule_8, extraction_rule_21, extraction_rule_22] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_33] +deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 7a544e25a..2febacca8 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -231,6 +231,24 @@ style: "sql_extractor" } + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE dd_delta_articles; + TRUNCATE dd_new_people_mentions; + TRUNCATE dd_delta_has_spouse_features; + TRUNCATE dd_new_articles; + TRUNCATE dd_new_has_spouse_candidates; + TRUNCATE dd_new_has_spouse_features; + TRUNCATE dd_new_sentences; + TRUNCATE dd_delta_people_mentions; + TRUNCATE dd_delta_has_spouse_candidates; + TRUNCATE dd_delta_sentences; + TRUNCATE dd_delta_has_spouse; + TRUNCATE dd_new_has_spouse; + """ + style: "sql_extractor" + } + deepdive.extraction.extractors.extraction_rule_30 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS @@ -415,3 +433,4 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_4, extraction_rule_1, extraction_rule_12, extraction_rule_10, extraction_rule_20, extraction_rule_17, extraction_rule_2, extraction_rule_0, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_16, extraction_rule_5, extraction_rule_8, extraction_rule_21, extraction_rule_22] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_33] +deepdive.pipeline.pipelines.cleanup: [cleanup] From 28de580690615c7efdc7288eb09070ef846507f6 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 28 May 2015 16:20:05 -0700 Subject: [PATCH 122/347] fix bugs in creating variable table schema: add id, label, (dd_count) column, reorder column order(move user specify column to the first column instead of id column. GreenPlum will distributed by the first column which might cause an issue by using id column) --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 5 +++-- .../spouse_example/compile-incremental.expected | 17 +++++++++++++---- .../compile-materialization.expected | 7 +++++-- .../spouse_example/compile.expected | 6 ++++-- .../compile-incremental.expected | 17 +++++++++++++---- 5 files changed, 38 insertions(+), 14 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 77ef7c509..596b54adb 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -309,7 +309,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var columnDecls = stmt.a.terms map { case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" } - if (ss.useDeltaCount && !stmt.isQuery) columnDecls = columnDecls :+ "dd_count int" + if (stmt.isQuery) columnDecls = columnDecls :+ "id bigint" :+ "label boolean" + if (ss.useDeltaCount) columnDecls = columnDecls :+ "dd_count int" val indentation = " " * stmt.a.name.length val blockName = ss.resolveExtractorBlockName(stmt) schemas += s""" @@ -352,7 +353,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val index = qs.getBodyIndex(stmt.supervision) val name = ss.resolveName(qs.getVar(stmt.supervision)) val labelCol = s"R${index}.${name}" - val headTermsStr = ( "0 as id" :: headTerms ).mkString(", ") + val headTermsStr = ( headTerms :+ "0 as id" ).mkString(", ") val ddCount = if (ss.useDeltaCount) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index e8292ef3e..8bfcb6bfb 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -72,7 +72,10 @@ deepdive.extraction.extractors.extraction_rule_20 { sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; CREATE TABLE - has_spouse(relation_id text) + has_spouse(relation_id text, + id bigint, + label boolean, + dd_count int) """ style: "sql_extractor" } @@ -218,7 +221,10 @@ deepdive.extraction.extractors.extraction_rule_21 { sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; CREATE TABLE - dd_delta_has_spouse(relation_id text) + dd_delta_has_spouse(relation_id text, + id bigint, + label boolean, + dd_count int) """ style: "sql_extractor" } @@ -226,7 +232,10 @@ deepdive.extraction.extractors.extraction_rule_22 { sql: """ DROP TABLE IF EXISTS dd_new_has_spouse CASCADE; CREATE TABLE - dd_new_has_spouse(relation_id text) + dd_new_has_spouse(relation_id text, + id bigint, + label boolean, + dd_count int) """ style: "sql_extractor" } @@ -374,7 +383,7 @@ deepdive.extraction.extractors.extraction_rule_32 { sql: """ TRUNCATE dd_delta_has_spouse; - INSERT INTO dd_delta_has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count AS dd_count + INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count FROM dd_delta_has_spouse_candidates R0 diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index 050204c9b..dffdaf9b1 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -49,7 +49,10 @@ deepdive.extraction.extractors.extraction_rule_5 { sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; CREATE TABLE - has_spouse(relation_id text) + has_spouse(relation_id text, + id bigint, + label boolean, + dd_count int) """ style: "sql_extractor" } @@ -89,7 +92,7 @@ deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE has_spouse; - INSERT INTO has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count AS dd_count + INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count FROM has_spouse_candidates R0 diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index 1eee5f63e..92d41bd89 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -47,7 +47,9 @@ deepdive.extraction.extractors.extraction_rule_5 { sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; CREATE TABLE - has_spouse(relation_id text) + has_spouse(relation_id text, + id bigint, + label boolean) """ style: "sql_extractor" } @@ -84,7 +86,7 @@ deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE has_spouse; - INSERT INTO has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label + INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label FROM has_spouse_candidates R0 diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 2febacca8..7c6d1e0c0 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -72,7 +72,10 @@ deepdive.extraction.extractors.extraction_rule_20 { sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; CREATE TABLE - has_spouse(relation_id text) + has_spouse(relation_id text, + id bigint, + label boolean, + dd_count int) """ style: "sql_extractor" } @@ -218,7 +221,10 @@ deepdive.extraction.extractors.extraction_rule_21 { sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; CREATE TABLE - dd_delta_has_spouse(relation_id text) + dd_delta_has_spouse(relation_id text, + id bigint, + label boolean, + dd_count int) """ style: "sql_extractor" } @@ -226,7 +232,10 @@ deepdive.extraction.extractors.extraction_rule_22 { sql: """ DROP TABLE IF EXISTS dd_new_has_spouse CASCADE; CREATE TABLE - dd_new_has_spouse(relation_id text) + dd_new_has_spouse(relation_id text, + id bigint, + label boolean, + dd_count int) """ style: "sql_extractor" } @@ -377,7 +386,7 @@ deepdive.extraction.extractors.extraction_rule_32 { sql: """ TRUNCATE dd_delta_has_spouse; - INSERT INTO dd_delta_has_spouse SELECT DISTINCT 0 as id, R0.relation_id, R0.is_true AS label , R0.dd_count AS dd_count + INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count FROM dd_delta_has_spouse_candidates R0 From 04aafc0c3416e175dfe3b60378976be4f24edfc5 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 28 May 2015 18:01:42 -0700 Subject: [PATCH 123/347] fix bugs 1. remove create original tables in INCREMENTAL mode. (We assume first run MATERIALIZATION mode) 2. reslove whether DDlog compiler need TRUNCATE table in extractors --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 17 +++- .../compile-incremental.expected | 79 +------------------ .../compile-materialization.expected | 2 +- .../spouse_example/compile.expected | 2 +- .../compile-incremental.expected | 79 +------------------ 5 files changed, 20 insertions(+), 159 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 596b54adb..20f812063 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -230,7 +230,10 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C case _ => }) - schemaDeclarationGroupByHead = schemaDeclarationToCompile.toList.groupBy(_.a.name) + schemaDeclarationGroupByHead = mode match { + case INCREMENTAL => schemaDeclarationToCompile.toList.filter(_.a.name.startsWith("dd_")).groupBy(_.a.name) + case _ => schemaDeclarationToCompile.toList.groupBy(_.a.name) + } extractionRuleGroupByHead = extractionRuleToCompile.toList.groupBy(_.q.head.name) inferenceRuleGroupByHead = inferenceRuleToCompile.toList.groupBy(_.q.head.name) functionCallRuleGroupByInput = functionCallRuleToCompile.toList.groupBy(_.input) @@ -395,11 +398,19 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } } val blockName = ss.resolveExtractorBlockName(stmts(0)) + // val createTable = if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) true else false + val sqlCmdForCleanUp = ss.mode match { + case MERGE => s"TRUNCATE ${stmts(0).q.head.name};" + case _ => if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) { + if (stmts(0).q.head.name.startsWith("dd_new_")) + s"TRUNCATE ${stmts(0).q.head.name};" + else "" + } else s"DROP VIEW IF EXISTS ${stmts(0).q.head.name};" + } val createTable = ss.mode match { case MERGE => true case _ => if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) true else false } - val sqlCmdForCleanUp = if (createTable) "TRUNCATE" else "DROP VIEW IF EXISTS" val sqlCmdForInsert = if (createTable) "INSERT INTO" else "CREATE VIEW" val useAS = if (createTable) "" else " AS" val cleanUp = ss.mode match { @@ -409,7 +420,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } val extractor = s""" deepdive.extraction.extractors.${blockName} { - sql: \"\"\" ${sqlCmdForCleanUp} ${stmts(0).q.head.name}; + sql: \"\"\" ${sqlCmdForCleanUp} ${sqlCmdForInsert} ${stmts(0).q.head.name}${useAS} ${inputQueries.mkString(" UNION ")}${cleanUp} \"\"\" style: "sql_extractor" diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index 8bfcb6bfb..a3d44b221 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -15,23 +15,6 @@ } - deepdive.extraction.extractors.extraction_rule_4 { - sql: """ DROP TABLE IF EXISTS sentences CASCADE; - CREATE TABLE - sentences(document_id text, - sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], - sentence_offset int, - sentence_id text, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_1 { sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; CREATE TABLE @@ -42,20 +25,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_12 { - sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; - CREATE TABLE - has_spouse_candidates(person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text, - is_true boolean, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_10 { sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; CREATE TABLE @@ -69,17 +38,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_20 { - sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; - CREATE TABLE - has_spouse(relation_id text, - id bigint, - label boolean, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_17 { sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; CREATE TABLE @@ -100,16 +58,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_0 { - sql: """ DROP TABLE IF EXISTS articles CASCADE; - CREATE TABLE - articles(article_id text, - text text, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_14 { sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; CREATE TABLE @@ -178,16 +126,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_16 { - sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; - CREATE TABLE - has_spouse_features(relation_id text, - feature text, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_5 { sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; CREATE TABLE @@ -205,19 +143,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_8 { - sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; - CREATE TABLE - people_mentions(sentence_id text, - start_position int, - length int, - text text, - mention_id text, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_21 { sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; CREATE TABLE @@ -382,7 +307,7 @@ deepdive.extraction.extractors.extraction_rule_32 { - sql: """ TRUNCATE dd_delta_has_spouse; + sql: """ INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count FROM dd_delta_has_spouse_candidates R0 @@ -436,7 +361,7 @@ } deepdive.pipeline.run: ${PIPELINE} -deepdive.pipeline.pipelines.initdb: [extraction_rule_4, extraction_rule_1, extraction_rule_12, extraction_rule_10, extraction_rule_20, extraction_rule_17, extraction_rule_2, extraction_rule_0, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_16, extraction_rule_5, extraction_rule_8, extraction_rule_21, extraction_rule_22] +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_33] deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index dffdaf9b1..6aae96223 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -91,7 +91,7 @@ } deepdive.extraction.extractors.extraction_rule_15 { - sql: """ TRUNCATE has_spouse; + sql: """ INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count FROM has_spouse_candidates R0 diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index 92d41bd89..ec2e5abbf 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -85,7 +85,7 @@ } deepdive.extraction.extractors.extraction_rule_15 { - sql: """ TRUNCATE has_spouse; + sql: """ INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label FROM has_spouse_candidates R0 diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 7c6d1e0c0..2387e8a23 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -15,23 +15,6 @@ } - deepdive.extraction.extractors.extraction_rule_4 { - sql: """ DROP TABLE IF EXISTS sentences CASCADE; - CREATE TABLE - sentences(document_id text, - sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], - sentence_offset int, - sentence_id text, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_1 { sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; CREATE TABLE @@ -42,20 +25,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_12 { - sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; - CREATE TABLE - has_spouse_candidates(person1_id text, - person2_id text, - sentence_id text, - description text, - relation_id text, - is_true boolean, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_10 { sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; CREATE TABLE @@ -69,17 +38,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_20 { - sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; - CREATE TABLE - has_spouse(relation_id text, - id bigint, - label boolean, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_17 { sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; CREATE TABLE @@ -100,16 +58,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_0 { - sql: """ DROP TABLE IF EXISTS articles CASCADE; - CREATE TABLE - articles(article_id text, - text text, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_14 { sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; CREATE TABLE @@ -178,16 +126,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_16 { - sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; - CREATE TABLE - has_spouse_features(relation_id text, - feature text, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_5 { sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; CREATE TABLE @@ -205,19 +143,6 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_8 { - sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; - CREATE TABLE - people_mentions(sentence_id text, - start_position int, - length int, - text text, - mention_id text, - dd_count int) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_21 { sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; CREATE TABLE @@ -385,7 +310,7 @@ deepdive.extraction.extractors.extraction_rule_32 { - sql: """ TRUNCATE dd_delta_has_spouse; + sql: """ INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count FROM dd_delta_has_spouse_candidates R0 @@ -439,7 +364,7 @@ } deepdive.pipeline.run: ${PIPELINE} -deepdive.pipeline.pipelines.initdb: [extraction_rule_4, extraction_rule_1, extraction_rule_12, extraction_rule_10, extraction_rule_20, extraction_rule_17, extraction_rule_2, extraction_rule_0, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_16, extraction_rule_5, extraction_rule_8, extraction_rule_21, extraction_rule_22] +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_33] deepdive.pipeline.pipelines.cleanup: [cleanup] From 2a1f2ded925276bde540b2fe372a87fe497c76f9 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 28 May 2015 18:26:58 -0700 Subject: [PATCH 124/347] update inference rule naming, from global index to local index in order to match inference name between MATERIALIZAION and INCREMENTAL mode --- src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 2 +- .../spouse_example/compile-incremental.expected | 4 ++-- .../spouse_example/compile-materialization.expected | 4 ++-- test/expected-output-test/spouse_example/compile.expected | 4 ++-- .../spouse_example_new_feature/compile-incremental.expected | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 20f812063..ce8a4fb46 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -147,7 +147,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // Given an inference rule, resolve its name for the compiled inference block. def resolveInferenceBlockName(s: InferenceRule): String = { - s"factor_${s.q.head.name}_${statements indexOf s}" + s"${s.q.head.name}_${(inferenceRuleGroupByHead map (_._2)).flatten.toList indexOf s}" } // Given a variable, resolve it. TODO: This should give a warning, diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index a3d44b221..a8f90345f 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -348,7 +348,7 @@ } - deepdive.inference.factors.factor_dd_delta_has_spouse_33 { + deepdive.inference.factors.dd_delta_has_spouse_0 { input_query: """ SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 @@ -363,5 +363,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] -deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_33] +deepdive.pipeline.pipelines.inference: [dd_delta_has_spouse_0] deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index 6aae96223..dd9d06ed1 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -168,7 +168,7 @@ } - deepdive.inference.factors.factor_has_spouse_16 { + deepdive.inference.factors.has_spouse_0 { input_query: """ SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 @@ -180,4 +180,4 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6] -deepdive.pipeline.pipelines.inference: [factor_has_spouse_16] +deepdive.pipeline.pipelines.inference: [has_spouse_0] diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index ec2e5abbf..09686a2d0 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -162,7 +162,7 @@ } - deepdive.inference.factors.factor_has_spouse_16 { + deepdive.inference.factors.has_spouse_0 { input_query: """ SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 @@ -174,4 +174,4 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6] -deepdive.pipeline.pipelines.inference: [factor_has_spouse_16] +deepdive.pipeline.pipelines.inference: [has_spouse_0] diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 2387e8a23..5ca1442c2 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -351,7 +351,7 @@ } - deepdive.inference.factors.factor_dd_delta_has_spouse_33 { + deepdive.inference.factors.dd_delta_has_spouse_0 { input_query: """ SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 @@ -366,5 +366,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] -deepdive.pipeline.pipelines.inference: [factor_dd_delta_has_spouse_33] +deepdive.pipeline.pipelines.inference: [dd_delta_has_spouse_0] deepdive.pipeline.pipelines.cleanup: [cleanup] From 28b73e7cdb1514abe35d4799d151b051ddc98ad6 Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 28 May 2015 18:39:12 -0700 Subject: [PATCH 125/347] add incremental_mode in dbsettings --- .../org/deepdive/ddlog/DeepDiveLogCompiler.scala | 15 +++++++++++---- .../spouse_example/compile-incremental.expected | 5 +++-- .../compile-materialization.expected | 5 +++-- .../spouse_example/compile-merge.expected | 5 +++-- .../spouse_example/compile.expected | 5 +++-- .../compile-incremental.expected | 5 +++-- 6 files changed, 26 insertions(+), 14 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index ce8a4fb46..1bc5c5f67 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -519,8 +519,14 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } - def compileUserSettings(): CompiledBlocks = { + def compileUserSettings(ss: CompilationState): CompiledBlocks = { // TODO read user's proto-application.conf and augment it + val mode = ss.mode match { + case ORIGINAL => "ORIGINAL" + case INCREMENTAL => "INCREMENTAL" + case MATERIALIZATION => "MATERIALIZATION" + case MERGE => "MERGE" + } List(""" deepdive.db.default { driver: "org.postgresql.Driver" @@ -530,8 +536,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { dbname: ${DBNAME} host: ${PGHOST} port: ${PGPORT} - } - """) + incremental_mode: """ + s"""${mode} + } + """) } def compilePipelines(ss: CompilationState): CompiledBlocks = { @@ -589,7 +596,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // compile the program into blocks of application.conf val blocks = ( - compileUserSettings + compileUserSettings(state) ::: compileVariableSchema(programToCompile, state) ::: diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index a8f90345f..eed01bc17 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -7,8 +7,9 @@ dbname: ${DBNAME} host: ${PGHOST} port: ${PGPORT} - } - + incremental_mode: INCREMENTAL + } + deepdive.schema.variables { dd_delta_has_spouse.label: Boolean diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index dd9d06ed1..5197ed894 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -7,8 +7,9 @@ dbname: ${DBNAME} host: ${PGHOST} port: ${PGPORT} - } - + incremental_mode: MATERIALIZATION + } + deepdive.schema.variables { has_spouse.label: Boolean diff --git a/test/expected-output-test/spouse_example/compile-merge.expected b/test/expected-output-test/spouse_example/compile-merge.expected index 7a071a982..f65265b3a 100644 --- a/test/expected-output-test/spouse_example/compile-merge.expected +++ b/test/expected-output-test/spouse_example/compile-merge.expected @@ -7,8 +7,9 @@ dbname: ${DBNAME} host: ${PGHOST} port: ${PGPORT} - } - + incremental_mode: MERGE + } + deepdive.schema.variables { diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index 09686a2d0..f83b1ac08 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -7,8 +7,9 @@ dbname: ${DBNAME} host: ${PGHOST} port: ${PGPORT} - } - + incremental_mode: ORIGINAL + } + deepdive.schema.variables { has_spouse.label: Boolean diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 5ca1442c2..3ad61910a 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -7,8 +7,9 @@ dbname: ${DBNAME} host: ${PGHOST} port: ${PGPORT} - } - + incremental_mode: INCREMENTAL + } + deepdive.schema.variables { dd_delta_has_spouse.label: Boolean From 369f5f9a3cfa0a87f8ae997945fa1f3c143ff16d Mon Sep 17 00:00:00 2001 From: senwu Date: Thu, 28 May 2015 19:22:05 -0700 Subject: [PATCH 126/347] add variable keys for INCREMENTAL mode to recognize unique keys --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 19 +++++++++++++++++++ .../compile-incremental.expected | 5 +++++ .../compile-materialization.expected | 1 + .../spouse_example/compile-merge.expected | 1 + .../spouse_example/compile.expected | 1 + .../compile-incremental.expected | 5 +++++ 6 files changed, 32 insertions(+) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 1bc5c5f67..d5943d6d2 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -541,6 +541,23 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { """) } + def compileVariableKey(ss: CompilationState): CompiledBlocks = { + var keys = new ListBuffer[String]() + for (stmt <- (ss.schemaDeclarationGroupByHead map (_._2)).flatten) { + var columnNames = stmt.a.terms map { + case Variable(name, _, i) => name + } + if (stmt.isQuery) keys += s"""${stmt.a.name} : [${columnNames.mkString(", ")}]""" + } + ss.mode match { + case INCREMENTAL => List(s""" + deepdive.schema.keys { + ${keys.mkString("\n ")} + }""") + case _ => List("") + } + } + def compilePipelines(ss: CompilationState): CompiledBlocks = { val run = "deepdive.pipeline.run: ${PIPELINE}" val setup_database_pipeline = ((ss.schemaDeclarationGroupByHead map (_._2)).flatten map {s => ss.resolveExtractorBlockName(s)}).mkString(", ") @@ -598,6 +615,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val blocks = ( compileUserSettings(state) ::: + compileVariableKey(state) + ::: compileVariableSchema(programToCompile, state) ::: body.toList diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index eed01bc17..feaf6c418 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -11,6 +11,11 @@ } + deepdive.schema.keys { + dd_delta_has_spouse : [relation_id] + dd_new_has_spouse : [relation_id] + } + deepdive.schema.variables { dd_delta_has_spouse.label: Boolean } diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index 5197ed894..126d3cf68 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -11,6 +11,7 @@ } + deepdive.schema.variables { has_spouse.label: Boolean } diff --git a/test/expected-output-test/spouse_example/compile-merge.expected b/test/expected-output-test/spouse_example/compile-merge.expected index f65265b3a..b0dfc4a2f 100644 --- a/test/expected-output-test/spouse_example/compile-merge.expected +++ b/test/expected-output-test/spouse_example/compile-merge.expected @@ -11,6 +11,7 @@ } + deepdive.schema.variables { } diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index f83b1ac08..674f5d9f6 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -11,6 +11,7 @@ } + deepdive.schema.variables { has_spouse.label: Boolean } diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 3ad61910a..69b1e1425 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -11,6 +11,11 @@ } + deepdive.schema.keys { + dd_delta_has_spouse : [relation_id] + dd_new_has_spouse : [relation_id] + } + deepdive.schema.variables { dd_delta_has_spouse.label: Boolean } From fb1d0fe1a91ce452f3c113ea68229e67eb3027b5 Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 29 May 2015 00:34:38 -0700 Subject: [PATCH 127/347] support inference rule with more than one variable --- examples/smoke.ddl | 35 +++ examples/spouse_example.ddl | 2 +- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 24 +- .../compile-incremental.expected | 268 ++++++++++++++++++ .../compile-materialization.expected | 118 ++++++++ .../smoke_example/compile-merge.expected | 72 +++++ .../smoke_example/compile.expected | 112 ++++++++ .../smoke_example/input.ddl | 35 +++ .../smoke_example/print-incremental.expected | 81 ++++++ .../smoke_example/print.expected | 31 ++ .../spouse_example/print-incremental.expected | 2 +- .../spouse_example/print.expected | 2 +- .../spouse_example_new_feature/input.ddl | 2 +- .../print-incremental.expected | 2 +- 14 files changed, 778 insertions(+), 8 deletions(-) create mode 100644 examples/smoke.ddl create mode 100644 test/expected-output-test/smoke_example/compile-incremental.expected create mode 100644 test/expected-output-test/smoke_example/compile-materialization.expected create mode 100644 test/expected-output-test/smoke_example/compile-merge.expected create mode 100644 test/expected-output-test/smoke_example/compile.expected create mode 100644 test/expected-output-test/smoke_example/input.ddl create mode 100644 test/expected-output-test/smoke_example/print-incremental.expected create mode 100644 test/expected-output-test/smoke_example/print.expected diff --git a/examples/smoke.ddl b/examples/smoke.ddl new file mode 100644 index 000000000..7371f2885 --- /dev/null +++ b/examples/smoke.ddl @@ -0,0 +1,35 @@ +person ( + person_id bigint, + name text +). + +person_has_cancer ( + person_id bigint, + has_cancer boolean +). + +person_smokes ( + person_id bigint, + smokes boolean +). + +friends ( + person_id bigint, + friend_id bigint +). + +smoke? ( + person_id bigint +). + +cancer? ( + person_id bigint +). + +smoke(pid) :- person_smokes(pid, l) label = l. +cancer(pid) :- person_has_cancer(pid, l) label = l. + +cancer(pid) :- smoke(pid), person_smokes(pid, l) + weight = 3 + label = l. + diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index 6d186b067..ca340bcab 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -75,4 +75,4 @@ has_spouse(rid) :- has_spouse_features(rid, f) weight = f label = l -semantics = imply. +semantics = Imply. diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index d5943d6d2..9fb793c7d 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -94,6 +94,8 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // The statement whether will compile or union to other statements var visible : Set[Statement] = Set() + var variableTableNames : Set[String] = Set() + var mode : Mode = ORIGINAL var useDeltaCount : Boolean = false @@ -113,12 +115,14 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C case _ => true } statements.foreach { - case SchemaDeclaration(Attribute(r, terms, types), isQuery) => + case SchemaDeclaration(Attribute(r, terms, types), isQuery) => { terms.foreach { case Variable(n,r,i) => schema += { (r,i) -> n } ground_relations += { r -> !isQuery } // record whether a query or a ground term. } + if (isQuery) variableTableNames += r + } case ExtractionRule(_,_) => () case InferenceRule(_,_,_,_) => () case fdecl : FunctionDeclaration => function_schema += {fdecl.functionName -> fdecl} @@ -478,7 +482,13 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val index = cqBody.length + 1 val qs2 = new QuerySchema( fakeCQ ) - val variableIdsStr = Some(s"""R0.id AS "${stmt.q.head.name}.R0.id" """) + val varInBody = (fakeBody map {x => + if (ss.variableTableNames contains x.name) + s"""R${fakeBody indexOf x}.id AS "${x.name}.R${fakeBody indexOf x}.id" """ + else "" + }).filter(_ != "") + + val variableIdsStr = Some(varInBody.mkString(", ")) // weight string val uwStr = stmt.weights match { @@ -496,7 +506,15 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { SELECT ${selectStr} ${ddCountStr} ${ ss.generateSQLBody(fakeCQ) }""" // factor function - func = s"""Imply(${stmt.q.head.name}.R0.label)""" + if (func.length == 0) { + val funcBody = (fakeBody map {x => + if (ss.variableTableNames contains x.name) + s"""${x.name}.R${fakeBody indexOf x}.label""" + else "" + }).filter(_ != "") + val firstFunc = funcBody(0) + func = s"""${stmt.semantics}(${(funcBody.tail :+ firstFunc).mkString(", ")})""" + } // weight if (weight.length == 0) weight = stmt.weights match { diff --git a/test/expected-output-test/smoke_example/compile-incremental.expected b/test/expected-output-test/smoke_example/compile-incremental.expected new file mode 100644 index 000000000..a1eebd935 --- /dev/null +++ b/test/expected-output-test/smoke_example/compile-incremental.expected @@ -0,0 +1,268 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: INCREMENTAL + } + + + deepdive.schema.keys { + dd_delta_smoke : [person_id] + dd_new_smoke : [person_id] + dd_delta_cancer : [person_id] + dd_new_cancer : [person_id] + } + + deepdive.schema.variables { + dd_delta_cancer.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_friends CASCADE; + CREATE TABLE + dd_delta_friends(person_id bigint, + friend_id bigint, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_smoke CASCADE; + CREATE TABLE + dd_delta_smoke(person_id bigint, + id bigint, + label boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_person_has_cancer CASCADE; + CREATE TABLE + dd_new_person_has_cancer(person_id bigint, + has_cancer boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_person CASCADE; + CREATE TABLE + dd_delta_person(person_id bigint, + name text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_person CASCADE; + CREATE TABLE + dd_new_person(person_id bigint, + name text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_person_has_cancer CASCADE; + CREATE TABLE + dd_delta_person_has_cancer(person_id bigint, + has_cancer boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP TABLE IF EXISTS dd_new_smoke CASCADE; + CREATE TABLE + dd_new_smoke(person_id bigint, + id bigint, + label boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_20 { + sql: """ DROP TABLE IF EXISTS dd_delta_cancer CASCADE; + CREATE TABLE + dd_delta_cancer(person_id bigint, + id bigint, + label boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ DROP TABLE IF EXISTS dd_new_cancer CASCADE; + CREATE TABLE + dd_new_cancer(person_id bigint, + id bigint, + label boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_person_smokes CASCADE; + CREATE TABLE + dd_delta_person_smokes(person_id bigint, + smokes boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_person_smokes CASCADE; + CREATE TABLE + dd_new_person_smokes(person_id bigint, + smokes boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_14 { + sql: """ DROP TABLE IF EXISTS dd_new_friends CASCADE; + CREATE TABLE + dd_new_friends(person_id bigint, + friend_id bigint, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE dd_delta_friends; + TRUNCATE dd_delta_smoke; + TRUNCATE dd_new_person_has_cancer; + TRUNCATE dd_delta_person; + TRUNCATE dd_new_person; + TRUNCATE dd_delta_person_has_cancer; + TRUNCATE dd_new_smoke; + TRUNCATE dd_delta_cancer; + TRUNCATE dd_new_cancer; + TRUNCATE dd_delta_person_smokes; + TRUNCATE dd_new_person_smokes; + TRUNCATE dd_new_friends; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_22 { + sql: """ + INSERT INTO dd_delta_smoke + SELECT R0.person_id AS "dd_delta_person_smokes.R0.person_id" , R0.dd_count AS "dd_count" + FROM dd_delta_person_smokes R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ TRUNCATE dd_new_person_has_cancer; + INSERT INTO dd_new_person_has_cancer + SELECT R0.person_id, R0.has_cancer, R0.dd_count + FROM person_has_cancer R0 + UNION + SELECT R0.person_id, R0.has_cancer, R0.dd_count + FROM dd_delta_person_has_cancer R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ TRUNCATE dd_new_person; + INSERT INTO dd_new_person + SELECT R0.person_id, R0.name, R0.dd_count + FROM person R0 + UNION + SELECT R0.person_id, R0.name, R0.dd_count + FROM dd_delta_person R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_23 { + sql: """ + INSERT INTO dd_delta_cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label , R0.dd_count AS dd_count + FROM dd_delta_person_has_cancer R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_11 { + sql: """ TRUNCATE dd_new_person_smokes; + INSERT INTO dd_new_person_smokes + SELECT R0.person_id, R0.smokes, R0.dd_count + FROM person_smokes R0 + UNION + SELECT R0.person_id, R0.smokes, R0.dd_count + FROM dd_delta_person_smokes R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ TRUNCATE dd_new_friends; + INSERT INTO dd_new_friends + SELECT R0.person_id, R0.friend_id, R0.dd_count + FROM friends R0 + UNION + SELECT R0.person_id, R0.friend_id, R0.dd_count + FROM dd_delta_friends R0 + + """ + style: "sql_extractor" + + } + + + deepdive.inference.factors.dd_delta_cancer_0 { + input_query: """ + SELECT R0.id AS "dd_delta_cancer.R0.id" , R1.id AS "dd_delta_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_cancer R0, dd_delta_smoke R1, person_smokes R2 + WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id UNION + SELECT R0.id AS "dd_delta_cancer.R0.id" , R1.id AS "dd_new_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_cancer R0, dd_new_smoke R1, dd_delta_person_smokes R2 + WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ + function: "Imply(dd_delta_smoke.R1.label, dd_delta_cancer.R0.label)" + weight: "3.0" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_13, extraction_rule_17, extraction_rule_6, extraction_rule_1, extraction_rule_2, extraction_rule_5, extraction_rule_18, extraction_rule_20, extraction_rule_21, extraction_rule_9, extraction_rule_10, extraction_rule_14] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_22, extraction_rule_3, extraction_rule_23, extraction_rule_11, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [dd_delta_cancer_0] +deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/smoke_example/compile-materialization.expected b/test/expected-output-test/smoke_example/compile-materialization.expected new file mode 100644 index 000000000..82710e1cd --- /dev/null +++ b/test/expected-output-test/smoke_example/compile-materialization.expected @@ -0,0 +1,118 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: MATERIALIZATION + } + + + + deepdive.schema.variables { + cancer.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS person_smokes CASCADE; + CREATE TABLE + person_smokes(person_id bigint, + smokes boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS smoke CASCADE; + CREATE TABLE + smoke(person_id bigint, + id bigint, + label boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS cancer CASCADE; + CREATE TABLE + cancer(person_id bigint, + id bigint, + label boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS friends CASCADE; + CREATE TABLE + friends(person_id bigint, + friend_id bigint, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS person_has_cancer CASCADE; + CREATE TABLE + person_has_cancer(person_id bigint, + has_cancer boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS person CASCADE; + CREATE TABLE + person(person_id bigint, + name text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ + INSERT INTO cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label , R0.dd_count AS dd_count + FROM person_has_cancer R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ + INSERT INTO smoke + SELECT R0.person_id AS "person_smokes.R0.person_id" , R0.dd_count AS "dd_count" + FROM person_smokes R0 + + """ + style: "sql_extractor" + + } + + + deepdive.inference.factors.cancer_0 { + input_query: """ + SELECT R0.id AS "cancer.R0.id" , R1.id AS "smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM cancer R0, smoke R1, person_smokes R2 + WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ + function: "Imply(smoke.R1.label, cancer.R0.label)" + weight: "3.0" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_2, extraction_rule_4, extraction_rule_5, extraction_rule_3, extraction_rule_1, extraction_rule_0] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [cancer_0] diff --git a/test/expected-output-test/smoke_example/compile-merge.expected b/test/expected-output-test/smoke_example/compile-merge.expected new file mode 100644 index 000000000..10a177f1e --- /dev/null +++ b/test/expected-output-test/smoke_example/compile-merge.expected @@ -0,0 +1,72 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: MERGE + } + + + + deepdive.schema.variables { + + } + + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ TRUNCATE person_smokes; + INSERT INTO person_smokes + SELECT R0.person_id, R0.smokes, SUM(R0.dd_count) + FROM dd_new_person_smokes R0 + GROUP BY R0.person_id, R0.smokes; + DELETE FROM person_smokes WHERE dd_count = 0; + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ TRUNCATE friends; + INSERT INTO friends + SELECT R0.person_id, R0.friend_id, SUM(R0.dd_count) + FROM dd_new_friends R0 + GROUP BY R0.person_id, R0.friend_id; + DELETE FROM friends WHERE dd_count = 0; + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ TRUNCATE person_has_cancer; + INSERT INTO person_has_cancer + SELECT R0.person_id, R0.has_cancer, SUM(R0.dd_count) + FROM dd_new_person_has_cancer R0 + GROUP BY R0.person_id, R0.has_cancer; + DELETE FROM person_has_cancer WHERE dd_count = 0; + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ TRUNCATE person; + INSERT INTO person + SELECT R0.person_id, R0.name, SUM(R0.dd_count) + FROM dd_new_person R0 + GROUP BY R0.person_id, R0.name; + DELETE FROM person WHERE dd_count = 0; + """ + style: "sql_extractor" + + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.extraction: [extraction_rule_2, extraction_rule_3, extraction_rule_1, extraction_rule_0] diff --git a/test/expected-output-test/smoke_example/compile.expected b/test/expected-output-test/smoke_example/compile.expected new file mode 100644 index 000000000..6d4c87891 --- /dev/null +++ b/test/expected-output-test/smoke_example/compile.expected @@ -0,0 +1,112 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: ORIGINAL + } + + + + deepdive.schema.variables { + cancer.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS person_smokes CASCADE; + CREATE TABLE + person_smokes(person_id bigint, + smokes boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS smoke CASCADE; + CREATE TABLE + smoke(person_id bigint, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS cancer CASCADE; + CREATE TABLE + cancer(person_id bigint, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS friends CASCADE; + CREATE TABLE + friends(person_id bigint, + friend_id bigint) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS person_has_cancer CASCADE; + CREATE TABLE + person_has_cancer(person_id bigint, + has_cancer boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS person CASCADE; + CREATE TABLE + person(person_id bigint, + name text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ + INSERT INTO cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label + FROM person_has_cancer R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ + INSERT INTO smoke + SELECT R0.person_id AS "person_smokes.R0.person_id" + FROM person_smokes R0 + + """ + style: "sql_extractor" + + } + + + deepdive.inference.factors.cancer_0 { + input_query: """ + SELECT R0.id AS "cancer.R0.id" , R1.id AS "smoke.R1.id" + FROM cancer R0, smoke R1, person_smokes R2 + WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ + function: "Imply(smoke.R1.label, cancer.R0.label)" + weight: "3.0" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_2, extraction_rule_4, extraction_rule_5, extraction_rule_3, extraction_rule_1, extraction_rule_0] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [cancer_0] diff --git a/test/expected-output-test/smoke_example/input.ddl b/test/expected-output-test/smoke_example/input.ddl new file mode 100644 index 000000000..7371f2885 --- /dev/null +++ b/test/expected-output-test/smoke_example/input.ddl @@ -0,0 +1,35 @@ +person ( + person_id bigint, + name text +). + +person_has_cancer ( + person_id bigint, + has_cancer boolean +). + +person_smokes ( + person_id bigint, + smokes boolean +). + +friends ( + person_id bigint, + friend_id bigint +). + +smoke? ( + person_id bigint +). + +cancer? ( + person_id bigint +). + +smoke(pid) :- person_smokes(pid, l) label = l. +cancer(pid) :- person_has_cancer(pid, l) label = l. + +cancer(pid) :- smoke(pid), person_smokes(pid, l) + weight = 3 + label = l. + diff --git a/test/expected-output-test/smoke_example/print-incremental.expected b/test/expected-output-test/smoke_example/print-incremental.expected new file mode 100644 index 000000000..def3b989b --- /dev/null +++ b/test/expected-output-test/smoke_example/print-incremental.expected @@ -0,0 +1,81 @@ +person(person_id bigint, + name text). + +dd_delta_person(person_id bigint, + name text). + +dd_new_person(person_id bigint, + name text). + +dd_new_person(person_id, name) :- + person(person_id, name); + dd_delta_person(person_id, name). + +person_has_cancer(person_id bigint, + has_cancer boolean). + +dd_delta_person_has_cancer(person_id bigint, + has_cancer boolean). + +dd_new_person_has_cancer(person_id bigint, + has_cancer boolean). + +dd_new_person_has_cancer(person_id, has_cancer) :- + person_has_cancer(person_id, has_cancer); + dd_delta_person_has_cancer(person_id, has_cancer). + +person_smokes(person_id bigint, + smokes boolean). + +dd_delta_person_smokes(person_id bigint, + smokes boolean). + +dd_new_person_smokes(person_id bigint, + smokes boolean). + +dd_new_person_smokes(person_id, smokes) :- + person_smokes(person_id, smokes); + dd_delta_person_smokes(person_id, smokes). + +friends(person_id bigint, + friend_id bigint). + +dd_delta_friends(person_id bigint, + friend_id bigint). + +dd_new_friends(person_id bigint, + friend_id bigint). + +dd_new_friends(person_id, friend_id) :- + friends(person_id, friend_id); + dd_delta_friends(person_id, friend_id). + +smoke?(person_id bigint). + +dd_delta_smoke?(person_id bigint). + +dd_new_smoke?(person_id bigint). + +cancer?(person_id bigint). + +dd_delta_cancer?(person_id bigint). + +dd_new_cancer?(person_id bigint). + +dd_delta_smoke(pid) :- + dd_delta_person_smokes(pid, l) + label = l. + +dd_delta_cancer(pid) :- + dd_delta_person_has_cancer(pid, l) + label = l. + +dd_delta_cancer(pid) :- + dd_delta_smoke(pid), + person_smokes(pid, l); + dd_new_smoke(pid), + dd_delta_person_smokes(pid, l) + weight = 3.0 + label = l + semantics = Imply. + diff --git a/test/expected-output-test/smoke_example/print.expected b/test/expected-output-test/smoke_example/print.expected new file mode 100644 index 000000000..d3e06c9c8 --- /dev/null +++ b/test/expected-output-test/smoke_example/print.expected @@ -0,0 +1,31 @@ +person(person_id bigint, + name text). + +person_has_cancer(person_id bigint, + has_cancer boolean). + +person_smokes(person_id bigint, + smokes boolean). + +friends(person_id bigint, + friend_id bigint). + +smoke?(person_id bigint). + +cancer?(person_id bigint). + +smoke(pid) :- + person_smokes(pid, l) + label = l. + +cancer(pid) :- + person_has_cancer(pid, l) + label = l. + +cancer(pid) :- + smoke(pid), + person_smokes(pid, l) + weight = 3.0 + label = l + semantics = Imply. + diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index de4ffdc89..0a053b167 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -173,5 +173,5 @@ dd_delta_has_spouse(rid) :- dd_delta_has_spouse_features(rid, f) weight = f label = l - semantics = imply. + semantics = Imply. diff --git a/test/expected-output-test/spouse_example/print.expected b/test/expected-output-test/spouse_example/print.expected index 4f6323266..b0260f557 100644 --- a/test/expected-output-test/spouse_example/print.expected +++ b/test/expected-output-test/spouse_example/print.expected @@ -75,5 +75,5 @@ has_spouse(rid) :- has_spouse_features(rid, f) weight = f label = l - semantics = imply. + semantics = Imply. diff --git a/test/expected-output-test/spouse_example_new_feature/input.ddl b/test/expected-output-test/spouse_example_new_feature/input.ddl index 0b22fabfc..aff904191 100644 --- a/test/expected-output-test/spouse_example_new_feature/input.ddl +++ b/test/expected-output-test/spouse_example_new_feature/input.ddl @@ -76,4 +76,4 @@ has_spouse(rid) :- has_spouse_features(rid, f) weight = f label = l -semantics = imply. +semantics = Imply. diff --git a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected index 0e3b00bc6..1ae89f64e 100644 --- a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected @@ -174,5 +174,5 @@ dd_delta_has_spouse(rid) :- dd_delta_has_spouse_features(rid, f) weight = f label = l - semantics = imply. + semantics = Imply. From fef8086cceffd776236097dacd1d39b4bd768567 Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 29 May 2015 14:36:42 -0700 Subject: [PATCH 128/347] remove label indicator in inference rules --- examples/spouse_example.ddl | 4 +--- .../scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 4 ++-- .../org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala | 2 +- src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala | 8 ++++---- .../org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala | 3 --- test/expected-output-test/semantics/input.ddl | 1 - test/expected-output-test/smoke_example/input.ddl | 4 +--- .../smoke_example/print-incremental.expected | 1 - test/expected-output-test/smoke_example/print.expected | 1 - .../spouse_example/print-incremental.expected | 1 - test/expected-output-test/spouse_example/print.expected | 1 - .../compile-incremental.expected | 2 +- .../spouse_example_new_feature/input.ddl | 3 +-- .../spouse_example_new_feature/print-incremental.expected | 3 +-- 14 files changed, 12 insertions(+), 26 deletions(-) diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index ca340bcab..4df11a8af 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -73,6 +73,4 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) -weight = f -label = l -semantics = Imply. +weight = f. diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 9fb793c7d..8a11cb126 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -124,7 +124,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C if (isQuery) variableTableNames += r } case ExtractionRule(_,_) => () - case InferenceRule(_,_,_,_) => () + case InferenceRule(_,_,_) => () case fdecl : FunctionDeclaration => function_schema += {fdecl.functionName -> fdecl} case FunctionCallRule(_,_,_) => () } @@ -596,7 +596,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var schema = Set[String]() // generate the statements. statements.foreach { - case InferenceRule(q, weights, supervision, rule) => + case InferenceRule(q, weights, rule) => val qs = new QuerySchema(q) schema += s"${q.head.name}.label: Boolean" case _ => () diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index db3964570..bd2b15738 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -128,7 +128,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental inference rule, // create delta rules based on original extraction rule def transform(stmt: InferenceRule): List[Statement] = { - List(InferenceRule(transform(stmt.q), stmt.weights, stmt.supervision, stmt.semantics)) + List(InferenceRule(transform(stmt.q), stmt.weights, stmt.semantics)) } def generateIncrementalFunctionInputList(program: DeepDiveLog.Program) { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index dfd1ef9be..d238dfa54 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -39,7 +39,7 @@ case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statem case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration], mode: String = null) extends Statement case class ExtractionRule(q : ConjunctiveQuery, supervision: String = null) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, supervision : String, semantics : String = "Imply") extends Statement // Weighted rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, semantics : String = "Imply") extends Statement // Weighted rule // Parser @@ -156,10 +156,10 @@ class DeepDiveLogParser extends JavaTokenParsers { def semantics = "semantics" ~> "=" ~> semanticType def inferenceRule : Parser[InferenceRule] = - ( conjunctiveQuery ~ factorWeight ~ supervision ~ opt(semantics) + ( conjunctiveQuery ~ factorWeight ~ opt(semantics) ) ^^ { - case (q ~ weight ~ supervision ~ semantics) => - InferenceRule(q, weight, supervision, semantics.getOrElse("Imply")) + case (q ~ weight ~ semantics) => + InferenceRule(q, weight, semantics.getOrElse("Imply")) } // rules or schema elements in arbitrary order diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 56f7249bb..2d3c300eb 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -80,9 +80,6 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { case UnknownFactorWeight(vs) => vs.mkString(", ") }) ) + - ( if (stmt.supervision == null) "" - else "\n label = " + stmt.supervision - ) + ( if (stmt.semantics == null) "" else "\n semantics = " + stmt.semantics ) + ".\n" diff --git a/test/expected-output-test/semantics/input.ddl b/test/expected-output-test/semantics/input.ddl index 3de8d36d1..f18e77c06 100644 --- a/test/expected-output-test/semantics/input.ddl +++ b/test/expected-output-test/semantics/input.ddl @@ -5,5 +5,4 @@ Q?(x int). Q(x) :- R(x, y, z); R(x, y, z), S(y, z, w); S(y, x, w), T(x, z, w) weight = y -label = z semantics = imply. diff --git a/test/expected-output-test/smoke_example/input.ddl b/test/expected-output-test/smoke_example/input.ddl index 7371f2885..10da92bed 100644 --- a/test/expected-output-test/smoke_example/input.ddl +++ b/test/expected-output-test/smoke_example/input.ddl @@ -30,6 +30,4 @@ smoke(pid) :- person_smokes(pid, l) label = l. cancer(pid) :- person_has_cancer(pid, l) label = l. cancer(pid) :- smoke(pid), person_smokes(pid, l) - weight = 3 - label = l. - + weight = 3.0. diff --git a/test/expected-output-test/smoke_example/print-incremental.expected b/test/expected-output-test/smoke_example/print-incremental.expected index def3b989b..51a3a7443 100644 --- a/test/expected-output-test/smoke_example/print-incremental.expected +++ b/test/expected-output-test/smoke_example/print-incremental.expected @@ -76,6 +76,5 @@ dd_delta_cancer(pid) :- dd_new_smoke(pid), dd_delta_person_smokes(pid, l) weight = 3.0 - label = l semantics = Imply. diff --git a/test/expected-output-test/smoke_example/print.expected b/test/expected-output-test/smoke_example/print.expected index d3e06c9c8..48f4d4b4f 100644 --- a/test/expected-output-test/smoke_example/print.expected +++ b/test/expected-output-test/smoke_example/print.expected @@ -26,6 +26,5 @@ cancer(pid) :- smoke(pid), person_smokes(pid, l) weight = 3.0 - label = l semantics = Imply. diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index 0a053b167..edc1c0271 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -172,6 +172,5 @@ dd_delta_has_spouse(rid) :- dd_new_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f - label = l semantics = Imply. diff --git a/test/expected-output-test/spouse_example/print.expected b/test/expected-output-test/spouse_example/print.expected index b0260f557..55dbaeaf7 100644 --- a/test/expected-output-test/spouse_example/print.expected +++ b/test/expected-output-test/spouse_example/print.expected @@ -74,6 +74,5 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f - label = l semantics = Imply. diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 69b1e1425..09f9c2a25 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -365,7 +365,7 @@ SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_delta_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_has_spouse.R0.label)" + function: "Linear(dd_delta_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } diff --git a/test/expected-output-test/spouse_example_new_feature/input.ddl b/test/expected-output-test/spouse_example_new_feature/input.ddl index aff904191..4761caf9c 100644 --- a/test/expected-output-test/spouse_example_new_feature/input.ddl +++ b/test/expected-output-test/spouse_example_new_feature/input.ddl @@ -75,5 +75,4 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f -label = l -semantics = Imply. +semantics = Linear. diff --git a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected index 1ae89f64e..b18dc4339 100644 --- a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected @@ -173,6 +173,5 @@ dd_delta_has_spouse(rid) :- dd_new_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f - label = l - semantics = Imply. + semantics = Linear. From f9c874cd423ccb7b2cf6714cbc17668aa4750f0d Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 29 May 2015 21:05:00 -0700 Subject: [PATCH 129/347] correct column type in spouse example --- examples/spouse_example.ddl | 13 ++++---- .../compile-incremental.expected | 20 ++++++------- .../compile-materialization.expected | 10 +++---- .../spouse_example/compile.expected | 10 +++---- .../spouse_example/print-incremental.expected | 30 +++++++++---------- .../spouse_example/print.expected | 10 +++---- .../compile-incremental.expected | 20 ++++++------- .../spouse_example_new_feature/input.ddl | 10 +++---- .../print-incremental.expected | 30 +++++++++---------- 9 files changed, 77 insertions(+), 76 deletions(-) diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index 4df11a8af..e27850501 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -5,11 +5,11 @@ articles( sentences( document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text). @@ -73,4 +73,5 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) -weight = f. +weight = f +semantics = Imply. diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index feaf6c418..3fdead7cc 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -93,11 +93,11 @@ CREATE TABLE dd_new_sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text, dd_count int) @@ -137,11 +137,11 @@ CREATE TABLE dd_delta_sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text, dd_count int) diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index 126d3cf68..2781a6e18 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -22,11 +22,11 @@ CREATE TABLE sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text, dd_count int) diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index 674f5d9f6..4ffb7f909 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -22,11 +22,11 @@ CREATE TABLE sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text) """ diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index edc1c0271..b3446d979 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -13,31 +13,31 @@ dd_new_articles(article_id, text) :- sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text). dd_delta_sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text). dd_new_sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text). diff --git a/test/expected-output-test/spouse_example/print.expected b/test/expected-output-test/spouse_example/print.expected index 55dbaeaf7..786573805 100644 --- a/test/expected-output-test/spouse_example/print.expected +++ b/test/expected-output-test/spouse_example/print.expected @@ -3,11 +3,11 @@ articles(article_id text, sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text). diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 09f9c2a25..47ef761b5 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -93,11 +93,11 @@ CREATE TABLE dd_new_sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text, dd_count int) @@ -137,11 +137,11 @@ CREATE TABLE dd_delta_sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text, dd_count int) diff --git a/test/expected-output-test/spouse_example_new_feature/input.ddl b/test/expected-output-test/spouse_example_new_feature/input.ddl index 4761caf9c..e932cbf3d 100644 --- a/test/expected-output-test/spouse_example_new_feature/input.ddl +++ b/test/expected-output-test/spouse_example_new_feature/input.ddl @@ -5,11 +5,11 @@ articles( sentences( document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text). diff --git a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected index b18dc4339..7aeaacf7b 100644 --- a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected @@ -13,31 +13,31 @@ dd_new_articles(article_id, text) :- sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text). dd_delta_sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text). dd_new_sentences(document_id text, sentence text, - words text[], - lemma text[], - pos_tags text[], - dependencies text[], - ner_tags text[], + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, sentence_offset int, sentence_id text). From e829e944cc7ab81264a42c7020e378c3433eeec4 Mon Sep 17 00:00:00 2001 From: senwu Date: Tue, 9 Jun 2015 15:40:28 -0700 Subject: [PATCH 130/347] clean up the udf directory in spouse example, add ${APP_HOME} prefix in compiler for udf directory --- examples/spouse_example.ddl | 6 +++--- src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 3 ++- .../spouse_example/compile-incremental.expected | 6 +++--- .../spouse_example/compile-materialization.expected | 6 +++--- test/expected-output-test/spouse_example/compile.expected | 6 +++--- .../spouse_example/print-incremental.expected | 6 +++--- test/expected-output-test/spouse_example/print.expected | 6 +++--- .../spouse_example_new_feature/compile-incremental.expected | 6 +++--- .../spouse_example_new_feature/input.ddl | 6 +++--- .../spouse_example_new_feature/print-incremental.expected | 6 +++--- 10 files changed, 29 insertions(+), 28 deletions(-) diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index e27850501..b6fd72021 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -42,7 +42,7 @@ ext_people_input(s, words, ner_tags) :- function ext_people over like ext_people_input returns like people_mentions - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines. + implementation "/udf/ext_people.py" handles tsv lines. has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). @@ -53,7 +53,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. + implementation "/udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). @@ -66,7 +66,7 @@ ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. + implementation "/udf/ext_has_spouse_features.py" handles tsv lines. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 8a11cb126..1b2fce6d6 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -444,7 +444,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val function = ss.resolveFunctionName(stmt.function) val udfDetails = (function.implementations collectFirst { case impl: RowWiseLineHandler => - s"""udf: \"${StringEscapeUtils.escapeJava(impl.command)}\" + s"""udf: $${APP_HOME}\"${StringEscapeUtils.escapeJava(impl.command)}\" style: \"${impl.format}_extractor\" """ }) @@ -576,6 +576,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } } + // generate application.conf pipelines def compilePipelines(ss: CompilationState): CompiledBlocks = { val run = "deepdive.pipeline.run: ${PIPELINE}" val setup_database_pipeline = ((ss.schemaDeclarationGroupByHead map (_._2)).flatten map {s => ss.resolveExtractorBlockName(s)}).mkString(", ") diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index 3fdead7cc..657fbcb45 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -328,7 +328,7 @@ input: """ SELECT * FROM dd_delta_ext_people_input """ output_relation: "dd_delta_people_mentions" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + udf: ${APP_HOME}"/udf/ext_people.py" style: "tsv_extractor" dependencies: [ "extraction_rule_24" ] } @@ -338,7 +338,7 @@ input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input """ output_relation: "dd_delta_has_spouse_features" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + udf: ${APP_HOME}"/udf/ext_has_spouse_features.py" style: "tsv_extractor" dependencies: [ "extraction_rule_30" ] } @@ -348,7 +348,7 @@ input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ output_relation: "dd_delta_has_spouse_candidates" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + udf: ${APP_HOME}"/udf/ext_has_spouse.py" style: "tsv_extractor" dependencies: [ "extraction_rule_27" ] } diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index 2781a6e18..7f90239da 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -144,7 +144,7 @@ input: """ SELECT * FROM ext_people_input """ output_relation: "people_mentions" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + udf: ${APP_HOME}"/udf/ext_people.py" style: "tsv_extractor" dependencies: [ "extraction_rule_7" ] } @@ -154,7 +154,7 @@ input: """ SELECT * FROM ext_has_spouse_features_input """ output_relation: "has_spouse_features" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + udf: ${APP_HOME}"/udf/ext_has_spouse_features.py" style: "tsv_extractor" dependencies: [ "extraction_rule_13" ] } @@ -164,7 +164,7 @@ input: """ SELECT * FROM ext_has_spouse_input """ output_relation: "has_spouse_candidates" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + udf: ${APP_HOME}"/udf/ext_has_spouse.py" style: "tsv_extractor" dependencies: [ "extraction_rule_10" ] } diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index 4ffb7f909..0a290eff3 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -138,7 +138,7 @@ input: """ SELECT * FROM ext_people_input """ output_relation: "people_mentions" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + udf: ${APP_HOME}"/udf/ext_people.py" style: "tsv_extractor" dependencies: [ "extraction_rule_7" ] } @@ -148,7 +148,7 @@ input: """ SELECT * FROM ext_has_spouse_features_input """ output_relation: "has_spouse_features" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + udf: ${APP_HOME}"/udf/ext_has_spouse_features.py" style: "tsv_extractor" dependencies: [ "extraction_rule_13" ] } @@ -158,7 +158,7 @@ input: """ SELECT * FROM ext_has_spouse_input """ output_relation: "has_spouse_candidates" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + udf: ${APP_HOME}"/udf/ext_has_spouse.py" style: "tsv_extractor" dependencies: [ "extraction_rule_10" ] } diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index b3446d979..aa6d787db 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -119,7 +119,7 @@ dd_delta_ext_people_input(s, words, ner_tags) :- function ext_people over like dd_delta_ext_people_input returns like dd_delta_people_mentions - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + implementation "/udf/ext_people.py" handles tsv lines. dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). @@ -133,7 +133,7 @@ dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like dd_delta_ext_has_spouse_input returns like dd_delta_has_spouse_candidates - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + implementation "/udf/ext_has_spouse.py" handles tsv lines. dd_delta_has_spouse_features :- !ext_has_spouse_features(dd_delta_ext_has_spouse_features_input). @@ -159,7 +159,7 @@ dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) : function ext_has_spouse_features over like dd_delta_ext_has_spouse_features_input returns like dd_delta_has_spouse_features - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + implementation "/udf/ext_has_spouse_features.py" handles tsv lines. dd_delta_has_spouse(rid) :- diff --git a/test/expected-output-test/spouse_example/print.expected b/test/expected-output-test/spouse_example/print.expected index 786573805..0827f99cf 100644 --- a/test/expected-output-test/spouse_example/print.expected +++ b/test/expected-output-test/spouse_example/print.expected @@ -37,7 +37,7 @@ ext_people_input(s, words, ner_tags) :- function ext_people over like ext_people_input returns like people_mentions - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + implementation "/udf/ext_people.py" handles tsv lines. has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). @@ -49,7 +49,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + implementation "/udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). @@ -63,7 +63,7 @@ ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + implementation "/udf/ext_has_spouse_features.py" handles tsv lines. has_spouse(rid) :- diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 47ef761b5..7de09941f 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -331,7 +331,7 @@ input: """ SELECT * FROM dd_delta_ext_people_input """ output_relation: "dd_delta_people_mentions" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + udf: ${APP_HOME}"/udf/ext_people.py" style: "tsv_extractor" dependencies: [ "extraction_rule_24" ] } @@ -341,7 +341,7 @@ input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input """ output_relation: "dd_delta_has_spouse_features" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + udf: ${APP_HOME}"/udf/ext_has_spouse_features.py" style: "tsv_extractor" dependencies: [ "extraction_rule_30" ] } @@ -351,7 +351,7 @@ input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ output_relation: "dd_delta_has_spouse_candidates" - udf: "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + udf: ${APP_HOME}"/udf/ext_has_spouse.py" style: "tsv_extractor" dependencies: [ "extraction_rule_27" ] } diff --git a/test/expected-output-test/spouse_example_new_feature/input.ddl b/test/expected-output-test/spouse_example_new_feature/input.ddl index e932cbf3d..f5c1bf625 100644 --- a/test/expected-output-test/spouse_example_new_feature/input.ddl +++ b/test/expected-output-test/spouse_example_new_feature/input.ddl @@ -42,7 +42,7 @@ ext_people_input(s, words, ner_tags) :- function ext_people over like ext_people_input returns like people_mentions - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" handles tsv lines + implementation "/udf/ext_people.py" handles tsv lines mode = inc. has_spouse_candidates :- @@ -54,7 +54,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" handles tsv lines. + implementation "/udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). @@ -67,7 +67,7 @@ ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" handles tsv lines. + implementation "/udf/ext_has_spouse_features.py" handles tsv lines. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. diff --git a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected index 7aeaacf7b..3edd6b253 100644 --- a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected @@ -120,7 +120,7 @@ dd_delta_ext_people_input(s, words, ner_tags) :- function ext_people over like dd_delta_ext_people_input returns like dd_delta_people_mentions - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_people.py" + implementation "/udf/ext_people.py" handles tsv lines mode = inc. dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). @@ -134,7 +134,7 @@ dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like dd_delta_ext_has_spouse_input returns like dd_delta_has_spouse_candidates - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse.py" + implementation "/udf/ext_has_spouse.py" handles tsv lines. dd_delta_has_spouse_features :- !ext_has_spouse_features(dd_delta_ext_has_spouse_features_input). @@ -160,7 +160,7 @@ dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) : function ext_has_spouse_features over like dd_delta_ext_has_spouse_features_input returns like dd_delta_has_spouse_features - implementation "/Users/feiran/workspace/release/deepdive/app/spouse_datalog/udf/ext_has_spouse_features.py" + implementation "/udf/ext_has_spouse_features.py" handles tsv lines. dd_delta_has_spouse(rid) :- From 9676719be8af0b762ea69cfd082db419233918ad Mon Sep 17 00:00:00 2001 From: senwu Date: Wed, 10 Jun 2015 12:06:45 -0700 Subject: [PATCH 131/347] add support for new inference rule --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 4 +- .../ddlog/DeepDiveLogDeltaDeriver.scala | 8 +- .../deepdive/ddlog/DeepDiveLogParser.scala | 10 +- .../compile-incremental.expected | 376 ++++++++++++++++++ .../spouse_example_new_inference/input.ddl | 78 ++++ .../print-incremental.expected | 178 +++++++++ .../print.expected | 78 ++++ 7 files changed, 722 insertions(+), 10 deletions(-) create mode 100644 test/expected-output-test/spouse_example_new_inference/compile-incremental.expected create mode 100644 test/expected-output-test/spouse_example_new_inference/input.ddl create mode 100644 test/expected-output-test/spouse_example_new_inference/print-incremental.expected create mode 100644 test/expected-output-test/spouse_example_new_inference/print.expected diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 1b2fce6d6..c78fa031a 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -124,7 +124,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C if (isQuery) variableTableNames += r } case ExtractionRule(_,_) => () - case InferenceRule(_,_,_) => () + case InferenceRule(_,_,_,_) => () case fdecl : FunctionDeclaration => function_schema += {fdecl.functionName -> fdecl} case FunctionCallRule(_,_,_) => () } @@ -597,7 +597,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var schema = Set[String]() // generate the statements. statements.foreach { - case InferenceRule(q, weights, rule) => + case InferenceRule(q, weights, rule, mode) => val qs = new QuerySchema(q) schema += s"${q.head.name}.label: Boolean" case _ => () diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index bd2b15738..cb24223d8 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -18,7 +18,7 @@ object DeepDiveLogDeltaDeriver{ case s: InferenceRule => transform(s) } - def transform(cq: ConjunctiveQuery): ConjunctiveQuery = { + def transform(cq: ConjunctiveQuery, mode: String): ConjunctiveQuery = { // New head val incCqHead = cq.head.copy( name = deltaPrefix + cq.head.name, @@ -43,7 +43,7 @@ object DeepDiveLogDeltaDeriver{ } var i = 0 var j = 0 - var index = if (incrementalFunctionInput contains incCqHead.name) -1 else 0 + var index = if ((incrementalFunctionInput contains incCqHead.name) || (mode == "inc")) -1 else 0 for (i <- index to (body.length - 1)) { var newBody = new ListBuffer[Atom]() for (j <- 0 to (body.length - 1)) { @@ -116,7 +116,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental extraction rule, // create delta rules based on original extraction rule def transform(stmt: ExtractionRule): List[Statement] = { - List(ExtractionRule(transform(stmt.q), stmt.supervision)) + List(ExtractionRule(transform(stmt.q, null), stmt.supervision)) } // Incremental function call rule, @@ -128,7 +128,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental inference rule, // create delta rules based on original extraction rule def transform(stmt: InferenceRule): List[Statement] = { - List(InferenceRule(transform(stmt.q), stmt.weights, stmt.semantics)) + List(InferenceRule(transform(stmt.q, stmt.mode), stmt.weights, stmt.semantics)) } def generateIncrementalFunctionInputList(program: DeepDiveLog.Program) { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index d238dfa54..10cc1caac 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -39,7 +39,7 @@ case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statem case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration], mode: String = null) extends Statement case class ExtractionRule(q : ConjunctiveQuery, supervision: String = null) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, semantics : String = "Imply") extends Statement // Weighted rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, semantics : String = "Imply", mode: String = null) extends Statement // Weighted rule // Parser @@ -67,6 +67,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def functionName = ident def semanticType = ident def functionModeType = ident + def inferenceModeType = ident def columnDeclaration: Parser[Column] = columnName ~ columnType ^^ { @@ -116,6 +117,7 @@ class DeepDiveLogParser extends JavaTokenParsers { ) def functionMode = "mode" ~> "=" ~> functionModeType + def inferenceMode = "mode" ~> "=" ~> inferenceModeType def functionImplementation : Parser[FunctionImplementationDeclaration] = "implementation" ~ stringLiteralAsString ~ "handles" ~ ("tsv" | "json") ~ "lines" ^^ { @@ -156,10 +158,10 @@ class DeepDiveLogParser extends JavaTokenParsers { def semantics = "semantics" ~> "=" ~> semanticType def inferenceRule : Parser[InferenceRule] = - ( conjunctiveQuery ~ factorWeight ~ opt(semantics) + ( conjunctiveQuery ~ factorWeight ~ opt(semantics) ~ opt(inferenceMode) ) ^^ { - case (q ~ weight ~ semantics) => - InferenceRule(q, weight, semantics.getOrElse("Imply")) + case (q ~ weight ~ semantics ~ mode) => + InferenceRule(q, weight, semantics.getOrElse("Imply"), mode.getOrElse(null)) } // rules or schema elements in arbitrary order diff --git a/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected new file mode 100644 index 000000000..56f8f4fd3 --- /dev/null +++ b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected @@ -0,0 +1,376 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: INCREMENTAL + } + + + deepdive.schema.keys { + dd_delta_has_spouse : [relation_id] + dd_new_has_spouse : [relation_id] + } + + deepdive.schema.variables { + dd_delta_has_spouse.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; + CREATE TABLE + dd_delta_articles(article_id text, + text text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; + CREATE TABLE + dd_new_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; + CREATE TABLE + dd_delta_has_spouse_features(relation_id text, + feature text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; + CREATE TABLE + dd_new_articles(article_id text, + text text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_14 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; + CREATE TABLE + dd_new_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; + CREATE TABLE + dd_new_has_spouse_features(relation_id text, + feature text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_sentences CASCADE; + CREATE TABLE + dd_new_sentences(document_id text, + sentence text, + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, + sentence_offset int, + sentence_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_people_mentions CASCADE; + CREATE TABLE + dd_delta_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_candidates CASCADE; + CREATE TABLE + dd_delta_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; + CREATE TABLE + dd_delta_sentences(document_id text, + sentence text, + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, + sentence_offset int, + sentence_id text, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; + CREATE TABLE + dd_delta_has_spouse(relation_id text, + id bigint, + label boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_22 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse CASCADE; + CREATE TABLE + dd_new_has_spouse(relation_id text, + id bigint, + label boolean, + dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE dd_delta_articles; + TRUNCATE dd_new_people_mentions; + TRUNCATE dd_delta_has_spouse_features; + TRUNCATE dd_new_articles; + TRUNCATE dd_new_has_spouse_candidates; + TRUNCATE dd_new_has_spouse_features; + TRUNCATE dd_new_sentences; + TRUNCATE dd_delta_people_mentions; + TRUNCATE dd_delta_has_spouse_candidates; + TRUNCATE dd_delta_sentences; + TRUNCATE dd_delta_has_spouse; + TRUNCATE dd_new_has_spouse; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_30 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; + CREATE VIEW dd_delta_ext_has_spouse_features_input AS + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_23" , "extraction_rule_15" , "extraction_rule_26" , "extraction_rule_7" , "extraction_rule_11" ] + } + + + deepdive.extraction.extractors.extraction_rule_11 { + sql: """ TRUNCATE dd_new_people_mentions; + INSERT INTO dd_new_people_mentions + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + FROM people_mentions R0 + UNION + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + FROM dd_delta_people_mentions R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_23" ] + } + + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ TRUNCATE dd_new_articles; + INSERT INTO dd_new_articles + SELECT R0.article_id, R0.text, R0.dd_count + FROM articles R0 + UNION + SELECT R0.article_id, R0.text, R0.dd_count + FROM dd_delta_articles R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_24 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; + CREATE VIEW dd_delta_ext_people_input AS + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + FROM dd_delta_sentences R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ TRUNCATE dd_new_has_spouse_candidates; + INSERT INTO dd_new_has_spouse_candidates + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + FROM has_spouse_candidates R0 + UNION + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + FROM dd_delta_has_spouse_candidates R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_26" ] + } + + + deepdive.extraction.extractors.extraction_rule_19 { + sql: """ TRUNCATE dd_new_has_spouse_features; + INSERT INTO dd_new_has_spouse_features + SELECT R0.relation_id, R0.feature, R0.dd_count + FROM has_spouse_features R0 + UNION + SELECT R0.relation_id, R0.feature, R0.dd_count + FROM dd_delta_has_spouse_features R0 + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_29" ] + } + + + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ TRUNCATE dd_new_sentences; + INSERT INTO dd_new_sentences + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + FROM sentences R0 + UNION + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + FROM dd_delta_sentences R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_27 { + sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; + CREATE VIEW dd_delta_ext_has_spouse_input AS + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_delta_people_mentions R0, people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id UNION + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_23" , "extraction_rule_11" ] + } + + + deepdive.extraction.extractors.extraction_rule_32 { + sql: """ + INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count + FROM dd_delta_has_spouse_candidates R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_26" ] + } + + + deepdive.extraction.extractors.extraction_rule_23 { + input: """ SELECT * FROM dd_delta_ext_people_input + """ + output_relation: "dd_delta_people_mentions" + udf: ${APP_HOME}"/udf/ext_people.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_24" ] + } + + + deepdive.extraction.extractors.extraction_rule_29 { + input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input + """ + output_relation: "dd_delta_has_spouse_features" + udf: ${APP_HOME}"/udf/ext_has_spouse_features.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_30" ] + } + + + deepdive.extraction.extractors.extraction_rule_26 { + input: """ SELECT * FROM dd_delta_ext_has_spouse_input + """ + output_relation: "dd_delta_has_spouse_candidates" + udf: ${APP_HOME}"/udf/ext_has_spouse.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_27" ] + } + + + deepdive.inference.factors.dd_delta_has_spouse_0 { + input_query: """ + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION + SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_delta_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + function: "Linear(dd_delta_has_spouse.R0.label)" + weight: "?(has_spouse_features.R2.feature)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [dd_delta_has_spouse_0] +deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/spouse_example_new_inference/input.ddl b/test/expected-output-test/spouse_example_new_inference/input.ddl new file mode 100644 index 000000000..db34f0d16 --- /dev/null +++ b/test/expected-output-test/spouse_example_new_inference/input.ddl @@ -0,0 +1,78 @@ +articles( + article_id text, + text text). + +sentences( + document_id text, + sentence text, + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, + sentence_offset int, + sentence_id text). + +people_mentions( + sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +has_spouse_candidates( + person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +has_spouse_features( + relation_id text, + feature text). + +has_spouse?(relation_id text). + +people_mentions :- + !ext_people(ext_people_input). + +ext_people_input(s, words, ner_tags) :- + sentences(a, b, words, c, d, e, ner_tags, f, s). + +function ext_people over like ext_people_input + returns like people_mentions + implementation "/udf/ext_people.py" handles tsv lines. + +has_spouse_candidates :- + !ext_has_spouse(ext_has_spouse_input). + +ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id). + +function ext_has_spouse over like ext_has_spouse_input + returns like has_spouse_candidates + implementation "/udf/ext_has_spouse.py" handles tsv lines. + +has_spouse_features :- + !ext_has_spouse_features(ext_has_spouse_features_input). + +ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +function ext_has_spouse_features over like ext_has_spouse_features_input + returns like has_spouse_features + implementation "/udf/ext_has_spouse_features.py" handles tsv lines. + +has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. + +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f) +weight = f +semantics = Linear +mode = inc. diff --git a/test/expected-output-test/spouse_example_new_inference/print-incremental.expected b/test/expected-output-test/spouse_example_new_inference/print-incremental.expected new file mode 100644 index 000000000..7619f98cd --- /dev/null +++ b/test/expected-output-test/spouse_example_new_inference/print-incremental.expected @@ -0,0 +1,178 @@ +articles(article_id text, + text text). + +dd_delta_articles(article_id text, + text text). + +dd_new_articles(article_id text, + text text). + +dd_new_articles(article_id, text) :- + articles(article_id, text); + dd_delta_articles(article_id, text). + +sentences(document_id text, + sentence text, + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, + sentence_offset int, + sentence_id text). + +dd_delta_sentences(document_id text, + sentence text, + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, + sentence_offset int, + sentence_id text). + +dd_new_sentences(document_id text, + sentence text, + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, + sentence_offset int, + sentence_id text). + +dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) :- + sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); + dd_delta_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). + +people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +dd_delta_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +dd_new_people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +dd_new_people_mentions(sentence_id, start_position, length, text, mention_id) :- + people_mentions(sentence_id, start_position, length, text, mention_id); + dd_delta_people_mentions(sentence_id, start_position, length, text, mention_id). + +has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +dd_delta_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +dd_new_has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true) :- + has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true); + dd_delta_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true). + +has_spouse_features(relation_id text, + feature text). + +dd_delta_has_spouse_features(relation_id text, + feature text). + +dd_new_has_spouse_features(relation_id text, + feature text). + +dd_new_has_spouse_features(relation_id, feature) :- + has_spouse_features(relation_id, feature); + dd_delta_has_spouse_features(relation_id, feature). + +has_spouse?(relation_id text). + +dd_delta_has_spouse?(relation_id text). + +dd_new_has_spouse?(relation_id text). + +dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). + +dd_delta_ext_people_input(s, words, ner_tags) :- + dd_delta_sentences(a, b, words, c, d, e, ner_tags, f, s). + +function ext_people + over like dd_delta_ext_people_input + returns like dd_delta_people_mentions + implementation "/udf/ext_people.py" + handles tsv lines. + +dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). + +dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + dd_delta_people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id); + dd_new_people_mentions(s, a, b, p1_text, p1_id), + dd_delta_people_mentions(s, c, d, p2_text, p2_id). + +function ext_has_spouse + over like dd_delta_ext_has_spouse_input + returns like dd_delta_has_spouse_candidates + implementation "/udf/ext_has_spouse.py" + handles tsv lines. + +dd_delta_has_spouse_features :- !ext_has_spouse_features(dd_delta_ext_has_spouse_features_input). + +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + dd_delta_sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id); + dd_new_sentences(a, b, words, c, d, e, f, g, s), + dd_delta_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id); + dd_new_sentences(a, b, words, c, d, e, f, g, s), + dd_new_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_delta_people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id); + dd_new_sentences(a, b, words, c, d, e, f, g, s), + dd_new_has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + dd_new_people_mentions(s, p1idx, p1len, k, person1_id), + dd_delta_people_mentions(s, p2idx, p2len, l, person2_id). + +function ext_has_spouse_features + over like dd_delta_ext_has_spouse_features_input + returns like dd_delta_has_spouse_features + implementation "/udf/ext_has_spouse_features.py" + handles tsv lines. + +dd_delta_has_spouse(rid) :- + dd_delta_has_spouse_candidates(a, b, c, d, rid, l) + label = l. + +dd_delta_has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f); + dd_delta_has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f); + dd_new_has_spouse_candidates(a, b, c, d, rid, l), + dd_delta_has_spouse_features(rid, f) + weight = f + semantics = Linear. + diff --git a/test/expected-output-test/spouse_example_new_inference/print.expected b/test/expected-output-test/spouse_example_new_inference/print.expected new file mode 100644 index 000000000..abcb22144 --- /dev/null +++ b/test/expected-output-test/spouse_example_new_inference/print.expected @@ -0,0 +1,78 @@ +articles(article_id text, + text text). + +sentences(document_id text, + sentence text, + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, + sentence_offset int, + sentence_id text). + +people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text). + +has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean). + +has_spouse_features(relation_id text, + feature text). + +has_spouse?(relation_id text). + +people_mentions :- !ext_people(ext_people_input). + +ext_people_input(s, words, ner_tags) :- + sentences(a, b, words, c, d, e, ner_tags, f, s). + +function ext_people + over like ext_people_input + returns like people_mentions + implementation "/udf/ext_people.py" + handles tsv lines. + +has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). + +ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- + people_mentions(s, a, b, p1_text, p1_id), + people_mentions(s, c, d, p2_text, p2_id). + +function ext_has_spouse + over like ext_has_spouse_input + returns like has_spouse_candidates + implementation "/udf/ext_has_spouse.py" + handles tsv lines. + +has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). + +ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- + sentences(a, b, words, c, d, e, f, g, s), + has_spouse_candidates(person1_id, person2_id, s, h, rid, x), + people_mentions(s, p1idx, p1len, k, person1_id), + people_mentions(s, p2idx, p2len, l, person2_id). + +function ext_has_spouse_features + over like ext_has_spouse_features_input + returns like has_spouse_features + implementation "/udf/ext_has_spouse_features.py" + handles tsv lines. + +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l) + label = l. + +has_spouse(rid) :- + has_spouse_candidates(a, b, c, d, rid, l), + has_spouse_features(rid, f) + weight = f + semantics = Linear. + From 4d57821c2f67d2d833a871161b6d5ce39d2bc627 Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 12 Jun 2015 01:35:13 -0700 Subject: [PATCH 132/347] fix bugs --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 24 ++- .../ddlog/DeepDiveLogDeltaDeriver.scala | 59 ++++-- .../many_joins/print-incremental.expected | 4 + .../compile-incremental.expected | 70 +++++-- .../compile-materialization.expected | 9 +- .../smoke_example/compile.expected | 9 +- .../smoke_example/print-incremental.expected | 10 +- .../compile-incremental.expected | 68 ++++--- .../spouse_example/print-incremental.expected | 6 +- .../compile-incremental.expected | 68 ++++--- .../print-incremental.expected | 6 +- .../compile-incremental.expected | 81 +++++--- .../compile.expected | 189 ++++++++++++++++++ .../spouse_example_new_inference/input.ddl | 9 +- .../print-incremental.expected | 17 +- .../print.expected | 9 +- 16 files changed, 497 insertions(+), 141 deletions(-) create mode 100644 test/expected-output-test/spouse_example_new_inference/compile.expected diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index c78fa031a..acf4676e4 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -351,8 +351,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) // Generate the body of the query. val qs = new QuerySchema( tmpCq ) - if (ss.inferenceRuleGroupByHead contains stmt.q.head.name) { - if (stmt.supervision == null) ss.error(s"Cannot find supervision for variable ${stmt.q.head.name}.\n") + if (stmt.supervision != null) { if (stmt.q.bodies.length > 1) ss.error(s"Scoping rule does not allow disjunction.\n") val headTerms = tmpCq.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" @@ -366,6 +365,16 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} ${ ss.generateSQLBody(tmpCq) } """ + } else if ((ss.schemaDeclarationGroupByHead contains stmt.q.head.name) && (ss.schemaDeclarationGroupByHead(stmt.q.head.name)(0).isQuery) && (stmt.q.head.name startsWith "dd_new_")) { + val headTerms = tmpCq.head.terms map { + case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" + } + val headTermsStr = ( headTerms :+ "0 as id" ).mkString(", ") + val ddCount = if (ss.useDeltaCount) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" + val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" + inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, label ${ddCountStr} + ${ ss.generateSQLBody(tmpCq) } + """ } else { // variable columns // dd_new_ tale only need original column name to make sure the schema is the same with original table @@ -589,7 +598,11 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case INCREMENTAL => if (setup_database_pipeline.length > 0) s"deepdive.pipeline.pipelines.cleanup: [cleanup]" else "" case _ => "" } - List(run, initdb, extraction_pipeline, inference_pipeline, cleanup_pipeline).filter(_ != "") + val base_dir = ss.mode match { + case INCREMENTAL => "deepdive.pipeline.base_dir: ${BASEDIR}" + case _ => "" + } + List(run, initdb, extraction_pipeline, inference_pipeline, cleanup_pipeline, base_dir).filter(_ != "") } // generate variable schema statements @@ -597,9 +610,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var schema = Set[String]() // generate the statements. statements.foreach { - case InferenceRule(q, weights, rule, mode) => - val qs = new QuerySchema(q) - schema += s"${q.head.name}.label: Boolean" + case SchemaDeclaration(a, isQuery) => + if (isQuery) schema += s"${a.name}.label: Boolean" case _ => () } val ddSchema = s""" diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index cb24223d8..16564f6fc 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -18,12 +18,20 @@ object DeepDiveLogDeltaDeriver{ case s: InferenceRule => transform(s) } - def transform(cq: ConjunctiveQuery, mode: String): ConjunctiveQuery = { + def transform(cq: ConjunctiveQuery, isInference: Boolean, mode: String): ConjunctiveQuery = { // New head - val incCqHead = cq.head.copy( - name = deltaPrefix + cq.head.name, - terms = cq.head.terms map {term => term.copy(relName = deltaPrefix + term.relName)} - ) + val incCqHead = if (isInference) { + cq.head.copy( + name = newPrefix + cq.head.name, + terms = cq.head.terms map {term => term.copy(relName = newPrefix + term.relName)} + ) + } else { + cq.head.copy( + name = deltaPrefix + cq.head.name, + terms = cq.head.terms map {term => term.copy(relName = deltaPrefix + term.relName)} + ) + } + var incCqBodies = new ListBuffer[List[Atom]]() // New incremental bodies for (body <- cq.bodies) { @@ -43,18 +51,22 @@ object DeepDiveLogDeltaDeriver{ } var i = 0 var j = 0 - var index = if ((incrementalFunctionInput contains incCqHead.name) || (mode == "inc")) -1 else 0 - for (i <- index to (body.length - 1)) { - var newBody = new ListBuffer[Atom]() - for (j <- 0 to (body.length - 1)) { - if (j > i) - newBody += body(j) - else if (j < i) - newBody += incNewBody(j) - else if (j == i) - newBody += incDeltaBody(j) + var index = if (incrementalFunctionInput contains incCqHead.name) -1 else 0 + if (mode == "inc") { + incCqBodies += incNewBody + } else { + for (i <- index to (body.length - 1)) { + var newBody = new ListBuffer[Atom]() + for (j <- 0 to (body.length - 1)) { + if (j > i) + newBody += body(j) + else if (j < i) + newBody += incNewBody(j) + else if (j == i) + newBody += incDeltaBody(j) + } + incCqBodies += newBody.toList } - incCqBodies += newBody.toList } } ConjunctiveQuery(incCqHead, incCqBodies.toList) @@ -87,10 +99,10 @@ object DeepDiveLogDeltaDeriver{ ) incrementalStatement += incNewStmt - if (!stmt.isQuery) { - incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(incNewStmt.a.name, incNewStmt.a.terms), - List(List(Atom(stmt.a.name, stmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))))) - } + // if (!stmt.isQuery) { + incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(incNewStmt.a.name, incNewStmt.a.terms), + List(List(Atom(stmt.a.name, stmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))))) + // } incrementalStatement.toList } @@ -116,7 +128,10 @@ object DeepDiveLogDeltaDeriver{ // Incremental extraction rule, // create delta rules based on original extraction rule def transform(stmt: ExtractionRule): List[Statement] = { - List(ExtractionRule(transform(stmt.q, null), stmt.supervision)) + // if (stmt.supervision != null) + // List(ExtractionRule(transform(stmt.q, true, null), stmt.supervision)) + // else + List(ExtractionRule(transform(stmt.q, false, null), stmt.supervision)) } // Incremental function call rule, @@ -128,7 +143,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental inference rule, // create delta rules based on original extraction rule def transform(stmt: InferenceRule): List[Statement] = { - List(InferenceRule(transform(stmt.q, stmt.mode), stmt.weights, stmt.semantics)) + List(InferenceRule(transform(stmt.q, true, stmt.mode), stmt.weights, stmt.semantics)) } def generateIncrementalFunctionInputList(program: DeepDiveLog.Program) { diff --git a/test/expected-output-test/many_joins/print-incremental.expected b/test/expected-output-test/many_joins/print-incremental.expected index 1394144d5..6edb3d0f8 100644 --- a/test/expected-output-test/many_joins/print-incremental.expected +++ b/test/expected-output-test/many_joins/print-incremental.expected @@ -4,6 +4,10 @@ dd_delta_R?(x text). dd_new_R?(x text). +dd_new_R(x) :- + R(x); + dd_delta_R(x). + S(x text). dd_delta_S(x text). diff --git a/test/expected-output-test/smoke_example/compile-incremental.expected b/test/expected-output-test/smoke_example/compile-incremental.expected index a1eebd935..475317479 100644 --- a/test/expected-output-test/smoke_example/compile-incremental.expected +++ b/test/expected-output-test/smoke_example/compile-incremental.expected @@ -19,7 +19,12 @@ } deepdive.schema.variables { - dd_delta_cancer.label: Boolean + dd_new_smoke.label: Boolean +smoke.label: Boolean +cancer.label: Boolean +dd_delta_cancer.label: Boolean +dd_new_cancer.label: Boolean +dd_delta_smoke.label: Boolean } @@ -95,7 +100,7 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_20 { + deepdive.extraction.extractors.extraction_rule_21 { sql: """ DROP TABLE IF EXISTS dd_delta_cancer CASCADE; CREATE TABLE dd_delta_cancer(person_id bigint, @@ -106,7 +111,7 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_21 { + deepdive.extraction.extractors.extraction_rule_22 { sql: """ DROP TABLE IF EXISTS dd_new_cancer CASCADE; CREATE TABLE dd_new_cancer(person_id bigint, @@ -165,12 +170,12 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_22 { + deepdive.extraction.extractors.extraction_rule_24 { sql: """ - INSERT INTO dd_delta_smoke - SELECT R0.person_id AS "dd_delta_person_smokes.R0.person_id" , R0.dd_count AS "dd_count" - FROM dd_delta_person_smokes R0 + INSERT INTO dd_delta_smoke SELECT DISTINCT R0.person_id, 0 as id, R0.smokes AS label , R0.dd_count AS dd_count + FROM dd_delta_person_smokes R0 + """ style: "sql_extractor" @@ -207,7 +212,22 @@ } - deepdive.extraction.extractors.extraction_rule_23 { + deepdive.extraction.extractors.extraction_rule_19 { + sql: """ TRUNCATE dd_new_smoke; + INSERT INTO dd_new_smoke SELECT DISTINCT R0.person_id, 0 as id, label , R0.dd_count AS dd_count + FROM smoke R0 + + UNION SELECT DISTINCT R0.person_id, 0 as id, label , R0.dd_count AS dd_count + FROM dd_delta_smoke R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_24" ] + } + + + deepdive.extraction.extractors.extraction_rule_25 { sql: """ INSERT INTO dd_delta_cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label , R0.dd_count AS dd_count FROM dd_delta_person_has_cancer R0 @@ -219,6 +239,21 @@ } + deepdive.extraction.extractors.extraction_rule_23 { + sql: """ TRUNCATE dd_new_cancer; + INSERT INTO dd_new_cancer SELECT DISTINCT R0.person_id, 0 as id, label , R0.dd_count AS dd_count + FROM cancer R0 + + UNION SELECT DISTINCT R0.person_id, 0 as id, label , R0.dd_count AS dd_count + FROM dd_delta_cancer R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_25" ] + } + + deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_person_smokes; INSERT INTO dd_new_person_smokes @@ -249,20 +284,21 @@ } - deepdive.inference.factors.dd_delta_cancer_0 { + deepdive.inference.factors.dd_new_cancer_0 { input_query: """ - SELECT R0.id AS "dd_delta_cancer.R0.id" , R1.id AS "dd_delta_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_cancer R0, dd_delta_smoke R1, person_smokes R2 + SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_delta_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_new_cancer R0, dd_delta_smoke R1, person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id UNION - SELECT R0.id AS "dd_delta_cancer.R0.id" , R1.id AS "dd_new_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_cancer R0, dd_new_smoke R1, dd_delta_person_smokes R2 + SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_new_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_new_cancer R0, dd_new_smoke R1, dd_delta_person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ - function: "Imply(dd_delta_smoke.R1.label, dd_delta_cancer.R0.label)" + function: "Imply(dd_delta_smoke.R1.label, dd_new_cancer.R0.label)" weight: "3.0" } deepdive.pipeline.run: ${PIPELINE} -deepdive.pipeline.pipelines.initdb: [extraction_rule_13, extraction_rule_17, extraction_rule_6, extraction_rule_1, extraction_rule_2, extraction_rule_5, extraction_rule_18, extraction_rule_20, extraction_rule_21, extraction_rule_9, extraction_rule_10, extraction_rule_14] -deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_22, extraction_rule_3, extraction_rule_23, extraction_rule_11, extraction_rule_15] -deepdive.pipeline.pipelines.inference: [dd_delta_cancer_0] +deepdive.pipeline.pipelines.initdb: [extraction_rule_13, extraction_rule_17, extraction_rule_6, extraction_rule_1, extraction_rule_2, extraction_rule_5, extraction_rule_18, extraction_rule_21, extraction_rule_22, extraction_rule_9, extraction_rule_10, extraction_rule_14] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_25, extraction_rule_19, extraction_rule_3, extraction_rule_23, extraction_rule_11, extraction_rule_24, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [dd_new_cancer_0] deepdive.pipeline.pipelines.cleanup: [cleanup] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/smoke_example/compile-materialization.expected b/test/expected-output-test/smoke_example/compile-materialization.expected index 82710e1cd..6296be46d 100644 --- a/test/expected-output-test/smoke_example/compile-materialization.expected +++ b/test/expected-output-test/smoke_example/compile-materialization.expected @@ -13,7 +13,8 @@ deepdive.schema.variables { - cancer.label: Boolean + smoke.label: Boolean +cancer.label: Boolean } @@ -93,10 +94,10 @@ deepdive.extraction.extractors.extraction_rule_6 { sql: """ - INSERT INTO smoke - SELECT R0.person_id AS "person_smokes.R0.person_id" , R0.dd_count AS "dd_count" - FROM person_smokes R0 + INSERT INTO smoke SELECT DISTINCT R0.person_id, 0 as id, R0.smokes AS label , R0.dd_count AS dd_count + FROM person_smokes R0 + """ style: "sql_extractor" diff --git a/test/expected-output-test/smoke_example/compile.expected b/test/expected-output-test/smoke_example/compile.expected index 6d4c87891..16c907851 100644 --- a/test/expected-output-test/smoke_example/compile.expected +++ b/test/expected-output-test/smoke_example/compile.expected @@ -13,7 +13,8 @@ deepdive.schema.variables { - cancer.label: Boolean + smoke.label: Boolean +cancer.label: Boolean } @@ -87,10 +88,10 @@ deepdive.extraction.extractors.extraction_rule_6 { sql: """ - INSERT INTO smoke - SELECT R0.person_id AS "person_smokes.R0.person_id" - FROM person_smokes R0 + INSERT INTO smoke SELECT DISTINCT R0.person_id, 0 as id, R0.smokes AS label + FROM person_smokes R0 + """ style: "sql_extractor" diff --git a/test/expected-output-test/smoke_example/print-incremental.expected b/test/expected-output-test/smoke_example/print-incremental.expected index 51a3a7443..5046bbaf3 100644 --- a/test/expected-output-test/smoke_example/print-incremental.expected +++ b/test/expected-output-test/smoke_example/print-incremental.expected @@ -56,12 +56,20 @@ dd_delta_smoke?(person_id bigint). dd_new_smoke?(person_id bigint). +dd_new_smoke(person_id) :- + smoke(person_id); + dd_delta_smoke(person_id). + cancer?(person_id bigint). dd_delta_cancer?(person_id bigint). dd_new_cancer?(person_id bigint). +dd_new_cancer(person_id) :- + cancer(person_id); + dd_delta_cancer(person_id). + dd_delta_smoke(pid) :- dd_delta_person_smokes(pid, l) label = l. @@ -70,7 +78,7 @@ dd_delta_cancer(pid) :- dd_delta_person_has_cancer(pid, l) label = l. -dd_delta_cancer(pid) :- +dd_new_cancer(pid) :- dd_delta_smoke(pid), person_smokes(pid, l); dd_new_smoke(pid), diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index 657fbcb45..439b69a1b 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -17,7 +17,9 @@ } deepdive.schema.variables { - dd_delta_has_spouse.label: Boolean + has_spouse.label: Boolean +dd_delta_has_spouse.label: Boolean +dd_new_has_spouse.label: Boolean } @@ -189,7 +191,7 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_30 { + deepdive.extraction.extractors.extraction_rule_31 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" @@ -206,7 +208,7 @@ WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" , "extraction_rule_15" , "extraction_rule_26" , "extraction_rule_7" , "extraction_rule_11" ] + dependencies: [ "extraction_rule_15" , "extraction_rule_27" , "extraction_rule_24" , "extraction_rule_7" , "extraction_rule_11" ] } @@ -221,7 +223,7 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" ] + dependencies: [ "extraction_rule_24" ] } @@ -240,7 +242,7 @@ } - deepdive.extraction.extractors.extraction_rule_24 { + deepdive.extraction.extractors.extraction_rule_25 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" @@ -263,7 +265,7 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_26" ] + dependencies: [ "extraction_rule_27" ] } @@ -278,7 +280,7 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_29" ] + dependencies: [ "extraction_rule_30" ] } @@ -297,7 +299,7 @@ } - deepdive.extraction.extractors.extraction_rule_27 { + deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" @@ -308,11 +310,11 @@ WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" , "extraction_rule_11" ] + dependencies: [ "extraction_rule_24" , "extraction_rule_11" ] } - deepdive.extraction.extractors.extraction_rule_32 { + deepdive.extraction.extractors.extraction_rule_33 { sql: """ INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count FROM dd_delta_has_spouse_candidates R0 @@ -320,54 +322,70 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_26" ] + dependencies: [ "extraction_rule_27" ] } - deepdive.extraction.extractors.extraction_rule_23 { + deepdive.extraction.extractors.extraction_rule_23 { + sql: """ TRUNCATE dd_new_has_spouse; + INSERT INTO dd_new_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + FROM has_spouse R0 + + UNION SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + FROM dd_delta_has_spouse R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_33" ] + } + + + deepdive.extraction.extractors.extraction_rule_24 { input: """ SELECT * FROM dd_delta_ext_people_input """ output_relation: "dd_delta_people_mentions" udf: ${APP_HOME}"/udf/ext_people.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_24" ] + dependencies: [ "extraction_rule_25" ] } - deepdive.extraction.extractors.extraction_rule_29 { + deepdive.extraction.extractors.extraction_rule_30 { input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input """ output_relation: "dd_delta_has_spouse_features" udf: ${APP_HOME}"/udf/ext_has_spouse_features.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_30" ] + dependencies: [ "extraction_rule_31" ] } - deepdive.extraction.extractors.extraction_rule_26 { + deepdive.extraction.extractors.extraction_rule_27 { input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ output_relation: "dd_delta_has_spouse_candidates" udf: ${APP_HOME}"/udf/ext_has_spouse.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_27" ] + dependencies: [ "extraction_rule_28" ] } - deepdive.inference.factors.dd_delta_has_spouse_0 { + deepdive.inference.factors.dd_new_has_spouse_0 { input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Imply(dd_delta_has_spouse.R0.label)" + function: "Imply(dd_new_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] -deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] -deepdive.pipeline.pipelines.inference: [dd_delta_has_spouse_0] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_28, extraction_rule_25, extraction_rule_31, extraction_rule_19, extraction_rule_3, extraction_rule_33, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [dd_new_has_spouse_0] deepdive.pipeline.pipelines.cleanup: [cleanup] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index aa6d787db..d552faf9f 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -111,6 +111,10 @@ dd_delta_has_spouse?(relation_id text). dd_new_has_spouse?(relation_id text). +dd_new_has_spouse(relation_id) :- + has_spouse(relation_id); + dd_delta_has_spouse(relation_id). + dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). dd_delta_ext_people_input(s, words, ner_tags) :- @@ -166,7 +170,7 @@ dd_delta_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l) label = l. -dd_delta_has_spouse(rid) :- +dd_new_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f); dd_new_has_spouse_candidates(a, b, c, d, rid, l), diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 7de09941f..52adc16f4 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -17,7 +17,9 @@ } deepdive.schema.variables { - dd_delta_has_spouse.label: Boolean + has_spouse.label: Boolean +dd_delta_has_spouse.label: Boolean +dd_new_has_spouse.label: Boolean } @@ -189,7 +191,7 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_30 { + deepdive.extraction.extractors.extraction_rule_31 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" @@ -206,7 +208,7 @@ WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" , "extraction_rule_15" , "extraction_rule_26" , "extraction_rule_7" , "extraction_rule_11" ] + dependencies: [ "extraction_rule_15" , "extraction_rule_27" , "extraction_rule_24" , "extraction_rule_7" , "extraction_rule_11" ] } @@ -221,7 +223,7 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" ] + dependencies: [ "extraction_rule_24" ] } @@ -240,7 +242,7 @@ } - deepdive.extraction.extractors.extraction_rule_24 { + deepdive.extraction.extractors.extraction_rule_25 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" , R0.dd_count AS "dd_count" @@ -266,7 +268,7 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_26" ] + dependencies: [ "extraction_rule_27" ] } @@ -281,7 +283,7 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_29" ] + dependencies: [ "extraction_rule_30" ] } @@ -300,7 +302,7 @@ } - deepdive.extraction.extractors.extraction_rule_27 { + deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" @@ -311,11 +313,11 @@ WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" , "extraction_rule_11" ] + dependencies: [ "extraction_rule_24" , "extraction_rule_11" ] } - deepdive.extraction.extractors.extraction_rule_32 { + deepdive.extraction.extractors.extraction_rule_33 { sql: """ INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count FROM dd_delta_has_spouse_candidates R0 @@ -323,54 +325,70 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_26" ] + dependencies: [ "extraction_rule_27" ] } - deepdive.extraction.extractors.extraction_rule_23 { + deepdive.extraction.extractors.extraction_rule_23 { + sql: """ TRUNCATE dd_new_has_spouse; + INSERT INTO dd_new_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + FROM has_spouse R0 + + UNION SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + FROM dd_delta_has_spouse R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_33" ] + } + + + deepdive.extraction.extractors.extraction_rule_24 { input: """ SELECT * FROM dd_delta_ext_people_input """ output_relation: "dd_delta_people_mentions" udf: ${APP_HOME}"/udf/ext_people.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_24" ] + dependencies: [ "extraction_rule_25" ] } - deepdive.extraction.extractors.extraction_rule_29 { + deepdive.extraction.extractors.extraction_rule_30 { input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input """ output_relation: "dd_delta_has_spouse_features" udf: ${APP_HOME}"/udf/ext_has_spouse_features.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_30" ] + dependencies: [ "extraction_rule_31" ] } - deepdive.extraction.extractors.extraction_rule_26 { + deepdive.extraction.extractors.extraction_rule_27 { input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ output_relation: "dd_delta_has_spouse_candidates" udf: ${APP_HOME}"/udf/ext_has_spouse.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_27" ] + dependencies: [ "extraction_rule_28" ] } - deepdive.inference.factors.dd_delta_has_spouse_0 { + deepdive.inference.factors.dd_new_has_spouse_0 { input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Linear(dd_delta_has_spouse.R0.label)" + function: "Linear(dd_new_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] -deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] -deepdive.pipeline.pipelines.inference: [dd_delta_has_spouse_0] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_28, extraction_rule_25, extraction_rule_31, extraction_rule_19, extraction_rule_3, extraction_rule_33, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [dd_new_has_spouse_0] deepdive.pipeline.pipelines.cleanup: [cleanup] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected index 3edd6b253..edc9e85c5 100644 --- a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected @@ -111,6 +111,10 @@ dd_delta_has_spouse?(relation_id text). dd_new_has_spouse?(relation_id text). +dd_new_has_spouse(relation_id) :- + has_spouse(relation_id); + dd_delta_has_spouse(relation_id). + dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). dd_delta_ext_people_input(s, words, ner_tags) :- @@ -167,7 +171,7 @@ dd_delta_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l) label = l. -dd_delta_has_spouse(rid) :- +dd_new_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f); dd_new_has_spouse_candidates(a, b, c, d, rid, l), diff --git a/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected index 56f8f4fd3..c3391a893 100644 --- a/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected @@ -17,7 +17,9 @@ } deepdive.schema.variables { - dd_delta_has_spouse.label: Boolean + has_spouse.label: Boolean +dd_delta_has_spouse.label: Boolean +dd_new_has_spouse.label: Boolean } @@ -189,7 +191,7 @@ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_30 { + deepdive.extraction.extractors.extraction_rule_31 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" @@ -206,7 +208,7 @@ WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" , "extraction_rule_15" , "extraction_rule_26" , "extraction_rule_7" , "extraction_rule_11" ] + dependencies: [ "extraction_rule_15" , "extraction_rule_27" , "extraction_rule_24" , "extraction_rule_7" , "extraction_rule_11" ] } @@ -221,7 +223,7 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" ] + dependencies: [ "extraction_rule_24" ] } @@ -240,7 +242,7 @@ } - deepdive.extraction.extractors.extraction_rule_24 { + deepdive.extraction.extractors.extraction_rule_25 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" @@ -263,7 +265,7 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_26" ] + dependencies: [ "extraction_rule_27" ] } @@ -278,7 +280,7 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_29" ] + dependencies: [ "extraction_rule_30" ] } @@ -297,7 +299,7 @@ } - deepdive.extraction.extractors.extraction_rule_27 { + deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" @@ -308,11 +310,11 @@ WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" - dependencies: [ "extraction_rule_23" , "extraction_rule_11" ] + dependencies: [ "extraction_rule_24" , "extraction_rule_11" ] } - deepdive.extraction.extractors.extraction_rule_32 { + deepdive.extraction.extractors.extraction_rule_33 { sql: """ INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count FROM dd_delta_has_spouse_candidates R0 @@ -320,57 +322,80 @@ """ style: "sql_extractor" - dependencies: [ "extraction_rule_26" ] + dependencies: [ "extraction_rule_27" ] + } + + + deepdive.extraction.extractors.extraction_rule_23 { + sql: """ TRUNCATE dd_new_has_spouse; + INSERT INTO dd_new_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + FROM has_spouse R0 + + UNION SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + FROM dd_delta_has_spouse R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_33" ] } - deepdive.extraction.extractors.extraction_rule_23 { + deepdive.extraction.extractors.extraction_rule_24 { input: """ SELECT * FROM dd_delta_ext_people_input """ output_relation: "dd_delta_people_mentions" udf: ${APP_HOME}"/udf/ext_people.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_24" ] + dependencies: [ "extraction_rule_25" ] } - deepdive.extraction.extractors.extraction_rule_29 { + deepdive.extraction.extractors.extraction_rule_30 { input: """ SELECT * FROM dd_delta_ext_has_spouse_features_input """ output_relation: "dd_delta_has_spouse_features" udf: ${APP_HOME}"/udf/ext_has_spouse_features.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_30" ] + dependencies: [ "extraction_rule_31" ] } - deepdive.extraction.extractors.extraction_rule_26 { + deepdive.extraction.extractors.extraction_rule_27 { input: """ SELECT * FROM dd_delta_ext_has_spouse_input """ output_relation: "dd_delta_has_spouse_candidates" udf: ${APP_HOME}"/udf/ext_has_spouse.py" style: "tsv_extractor" - dependencies: [ "extraction_rule_27" ] + dependencies: [ "extraction_rule_28" ] } - deepdive.inference.factors.dd_delta_has_spouse_0 { + deepdive.inference.factors.dd_new_has_spouse_0 { input_query: """ - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION - SELECT R0.id AS "dd_delta_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" - FROM dd_delta_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ - function: "Linear(dd_delta_has_spouse.R0.label)" + function: "Imply(dd_new_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } + + deepdive.inference.factors.dd_new_has_spouse_1 { + input_query: """ + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R1.id AS "dd_new_has_spouse.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + FROM dd_new_has_spouse R0, dd_new_has_spouse R1, dd_new_has_spouse_candidates R2, dd_new_has_spouse_candidates R3 + WHERE R2.relation_id = R0.relation_id AND R3.person1_id = R2.person2_id AND R3.person2_id = R2.person1_id AND R3.relation_id = R1.relation_id """ + function: "Imply(dd_new_has_spouse.R1.label, dd_new_has_spouse.R0.label)" + weight: "3.0" + } + deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] -deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_19, extraction_rule_32, extraction_rule_3, extraction_rule_26, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_29, extraction_rule_15] -deepdive.pipeline.pipelines.inference: [dd_delta_has_spouse_0] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_28, extraction_rule_25, extraction_rule_31, extraction_rule_19, extraction_rule_3, extraction_rule_33, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [dd_new_has_spouse_0, dd_new_has_spouse_1] deepdive.pipeline.pipelines.cleanup: [cleanup] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/spouse_example_new_inference/compile.expected b/test/expected-output-test/spouse_example_new_inference/compile.expected new file mode 100644 index 000000000..981f3cee2 --- /dev/null +++ b/test/expected-output-test/spouse_example_new_inference/compile.expected @@ -0,0 +1,189 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: ORIGINAL + } + + + + deepdive.schema.variables { + has_spouse.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS sentences CASCADE; + CREATE TABLE + sentences(document_id text, + sentence text, + words text, + lemma text, + pos_tags text, + dependencies text, + ner_tags text, + sentence_offset int, + sentence_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; + CREATE TABLE + has_spouse_candidates(person1_id text, + person2_id text, + sentence_id text, + description text, + relation_id text, + is_true boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; + CREATE TABLE + has_spouse(relation_id text, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS articles CASCADE; + CREATE TABLE + articles(article_id text, + text text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; + CREATE TABLE + has_spouse_features(relation_id text, + feature text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; + CREATE TABLE + people_mentions(sentence_id text, + start_position int, + length int, + text text, + mention_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ + INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label + FROM has_spouse_candidates R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_9" ] + } + + + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ DROP VIEW IF EXISTS ext_people_input; + CREATE VIEW ext_people_input AS + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + FROM sentences R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; + CREATE VIEW ext_has_spouse_features_input AS + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_9" , "extraction_rule_6" ] + } + + + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; + CREATE VIEW ext_has_spouse_input AS + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + FROM people_mentions R0, people_mentions R1 + WHERE R1.sentence_id = R0.sentence_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_6" ] + } + + + deepdive.extraction.extractors.extraction_rule_6 { + input: """ SELECT * FROM ext_people_input + """ + output_relation: "people_mentions" + udf: ${APP_HOME}"/udf/ext_people.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_7" ] + } + + + deepdive.extraction.extractors.extraction_rule_12 { + input: """ SELECT * FROM ext_has_spouse_features_input + """ + output_relation: "has_spouse_features" + udf: ${APP_HOME}"/udf/ext_has_spouse_features.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_13" ] + } + + + deepdive.extraction.extractors.extraction_rule_9 { + input: """ SELECT * FROM ext_has_spouse_input + """ + output_relation: "has_spouse_candidates" + udf: ${APP_HOME}"/udf/ext_has_spouse.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_10" ] + } + + + deepdive.inference.factors.has_spouse_0 { + input_query: """ + SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" + FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + function: "Imply(has_spouse.R0.label)" + weight: "?(has_spouse_features.R2.feature)" + } + + + deepdive.inference.factors.has_spouse_1 { + input_query: """ + SELECT R0.id AS "has_spouse.R0.id" , R1.id AS "has_spouse.R1.id" + FROM has_spouse R0, has_spouse R1, has_spouse_candidates R2, has_spouse_candidates R3 + WHERE R2.relation_id = R0.relation_id AND R3.person1_id = R2.person2_id AND R3.person2_id = R2.person1_id AND R3.relation_id = R1.relation_id """ + function: "Imply(has_spouse.R1.label, has_spouse.R0.label)" + weight: "3.0" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [has_spouse_0, has_spouse_1] diff --git a/test/expected-output-test/spouse_example_new_inference/input.ddl b/test/expected-output-test/spouse_example_new_inference/input.ddl index db34f0d16..e6eb50fce 100644 --- a/test/expected-output-test/spouse_example_new_inference/input.ddl +++ b/test/expected-output-test/spouse_example_new_inference/input.ddl @@ -73,6 +73,11 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) -weight = f -semantics = Linear +weight = f. + +has_spouse(rid) :- + has_spouse(rid2), + has_spouse_candidates(a1, b1, c1, d1, rid, l1), + has_spouse_candidates(b1, a1, c2, d2, rid2, l2) +weight = 3.0 mode = inc. diff --git a/test/expected-output-test/spouse_example_new_inference/print-incremental.expected b/test/expected-output-test/spouse_example_new_inference/print-incremental.expected index 7619f98cd..baab6b961 100644 --- a/test/expected-output-test/spouse_example_new_inference/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_inference/print-incremental.expected @@ -111,6 +111,10 @@ dd_delta_has_spouse?(relation_id text). dd_new_has_spouse?(relation_id text). +dd_new_has_spouse(relation_id) :- + has_spouse(relation_id); + dd_delta_has_spouse(relation_id). + dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). dd_delta_ext_people_input(s, words, ner_tags) :- @@ -166,13 +170,18 @@ dd_delta_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l) label = l. -dd_delta_has_spouse(rid) :- - has_spouse_candidates(a, b, c, d, rid, l), - has_spouse_features(rid, f); +dd_new_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f); dd_new_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f - semantics = Linear. + semantics = Imply. + +dd_new_has_spouse(rid) :- + dd_new_has_spouse(rid2), + dd_new_has_spouse_candidates(a1, b1, c1, d1, rid, l1), + dd_new_has_spouse_candidates(b1, a1, c2, d2, rid2, l2) + weight = 3.0 + semantics = Imply. diff --git a/test/expected-output-test/spouse_example_new_inference/print.expected b/test/expected-output-test/spouse_example_new_inference/print.expected index abcb22144..4e8b107bd 100644 --- a/test/expected-output-test/spouse_example_new_inference/print.expected +++ b/test/expected-output-test/spouse_example_new_inference/print.expected @@ -74,5 +74,12 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f - semantics = Linear. + semantics = Imply. + +has_spouse(rid) :- + has_spouse(rid2), + has_spouse_candidates(a1, b1, c1, d1, rid, l1), + has_spouse_candidates(b1, a1, c2, d2, rid2, l2) + weight = 3.0 + semantics = Imply. From abeb61b389727b067cd4c5ae04414b4850165b21 Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 12 Jun 2015 04:37:45 -0700 Subject: [PATCH 133/347] fix bugs, use view for dd_new_ variable tables --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 45 ++-- .../compile-incremental.expected | 186 ++++++++--------- .../compile-materialization.expected | 91 ++++---- .../smoke_example/compile.expected | 90 ++++---- .../compile-incremental.expected | 192 ++++++++--------- .../compile-materialization.expected | 101 +++++---- .../spouse_example/compile.expected | 100 +++++---- .../compile-incremental.expected | 194 ++++++++---------- .../compile-incremental.expected | 192 ++++++++--------- .../compile.expected | 100 +++++---- 10 files changed, 654 insertions(+), 637 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index acf4676e4..e2b1dc3e7 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -313,24 +313,27 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { def compileSchemaDeclarations(stmts: List[SchemaDeclaration], ss: CompilationState): CompiledBlocks = { var schemas = new ListBuffer[String]() for (stmt <- stmts) { - var columnDecls = stmt.a.terms map { - case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" + if ((stmt.a.name startsWith "dd_new_") && (ss.inferenceRuleGroupByHead contains stmt.a.name)) { + } else { + var columnDecls = stmt.a.terms map { + case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" + } + if (stmt.isQuery) columnDecls = columnDecls :+ "id bigint" :+ "label boolean" + if (ss.useDeltaCount) columnDecls = columnDecls :+ "dd_count int" + val indentation = " " * stmt.a.name.length + val blockName = ss.resolveExtractorBlockName(stmt) + schemas += s""" + deepdive.extraction.extractors.${blockName} { + sql: \"\"\" DROP TABLE IF EXISTS ${stmt.a.name} CASCADE; + CREATE TABLE + ${stmt.a.name}(${columnDecls.mkString(",\n" + indentation)}) + \"\"\" + style: "sql_extractor" + }""" } - if (stmt.isQuery) columnDecls = columnDecls :+ "id bigint" :+ "label boolean" - if (ss.useDeltaCount) columnDecls = columnDecls :+ "dd_count int" - val indentation = " " * stmt.a.name.length - val blockName = ss.resolveExtractorBlockName(stmt) - schemas += s""" - deepdive.extraction.extractors.${blockName} { - sql: \"\"\" DROP TABLE IF EXISTS ${stmt.a.name} CASCADE; - CREATE TABLE - ${stmt.a.name}(${columnDecls.mkString(",\n" + indentation)}) - \"\"\" - style: "sql_extractor" - }""" } // Cleanup incremental table extractor - val truncateTableList = (stmts map (x => if (x.a.name.startsWith("dd_")) s"TRUNCATE ${x.a.name};" else "")).filter(_ != "") + val truncateTableList = (stmts map (x => if ((x.a.name startsWith "dd_new_") && (ss.inferenceRuleGroupByHead contains x.a.name)) "" else s"TRUNCATE ${x.a.name};")).filter(_ != "") if (truncateTableList.length > 0) { schemas += s""" deepdive.extraction.extractors.cleanup { @@ -369,7 +372,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val headTerms = tmpCq.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" } - val headTermsStr = ( headTerms :+ "0 as id" ).mkString(", ") + val headTermsStr = ( headTerms :+ "id" ).mkString(", ") val ddCount = if (ss.useDeltaCount) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, label ${ddCountStr} @@ -415,14 +418,14 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val sqlCmdForCleanUp = ss.mode match { case MERGE => s"TRUNCATE ${stmts(0).q.head.name};" case _ => if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) { - if (stmts(0).q.head.name.startsWith("dd_new_")) + if (stmts(0).q.head.name.startsWith("dd_new_") && !(ss.inferenceRuleGroupByHead contains stmts(0).q.head.name)) s"TRUNCATE ${stmts(0).q.head.name};" else "" } else s"DROP VIEW IF EXISTS ${stmts(0).q.head.name};" } val createTable = ss.mode match { case MERGE => true - case _ => if (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name) true else false + case _ => if ((!(stmts(0).q.head.name startsWith "dd_new_") && (ss.schemaDeclarationGroupByHead contains stmts(0).q.head.name)) || ((stmts(0).q.head.name startsWith "dd_new_") && !(ss.inferenceRuleGroupByHead contains stmts(0).q.head.name))) true else false } val sqlCmdForInsert = if (createTable) "INSERT INTO" else "CREATE VIEW" val useAS = if (createTable) "" else " AS" @@ -434,7 +437,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val extractor = s""" deepdive.extraction.extractors.${blockName} { sql: \"\"\" ${sqlCmdForCleanUp} - ${sqlCmdForInsert} ${stmts(0).q.head.name}${useAS} ${inputQueries.mkString(" UNION ")}${cleanUp} + ${sqlCmdForInsert} ${stmts(0).q.head.name}${useAS} ${inputQueries.mkString(" UNION ALL ")}${cleanUp} \"\"\" style: "sql_extractor" ${ss.generateDependenciesOfCompiledBlockFor(stmts)} @@ -536,7 +539,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val blockName = ss.resolveInferenceBlockName(stmt) blocks ::= s""" deepdive.inference.factors.${blockName} { - input_query: \"\"\"${inputQueries.mkString(" UNION ")}\"\"\" + input_query: \"\"\"${inputQueries.mkString(" UNION ALL ")}\"\"\" function: "${func}" weight: "${weight}" } @@ -599,7 +602,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case _ => "" } val base_dir = ss.mode match { - case INCREMENTAL => "deepdive.pipeline.base_dir: ${BASEDIR}" + case MATERIALIZATION | INCREMENTAL => "deepdive.pipeline.base_dir: ${BASEDIR}" case _ => "" } List(run, initdb, extraction_pipeline, inference_pipeline, cleanup_pipeline, base_dir).filter(_ != "") diff --git a/test/expected-output-test/smoke_example/compile-incremental.expected b/test/expected-output-test/smoke_example/compile-incremental.expected index 475317479..08154a6ce 100644 --- a/test/expected-output-test/smoke_example/compile-incremental.expected +++ b/test/expected-output-test/smoke_example/compile-incremental.expected @@ -28,129 +28,118 @@ dd_delta_smoke.label: Boolean } - deepdive.extraction.extractors.extraction_rule_13 { - sql: """ DROP TABLE IF EXISTS dd_delta_friends CASCADE; - CREATE TABLE - dd_delta_friends(person_id bigint, + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_friends CASCADE; + CREATE TABLE + dd_delta_friends(person_id bigint, friend_id bigint, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_17 { - sql: """ DROP TABLE IF EXISTS dd_delta_smoke CASCADE; - CREATE TABLE - dd_delta_smoke(person_id bigint, + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_smoke CASCADE; + CREATE TABLE + dd_delta_smoke(person_id bigint, id bigint, label boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_6 { - sql: """ DROP TABLE IF EXISTS dd_new_person_has_cancer CASCADE; - CREATE TABLE - dd_new_person_has_cancer(person_id bigint, + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_person_has_cancer CASCADE; + CREATE TABLE + dd_new_person_has_cancer(person_id bigint, has_cancer boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_1 { - sql: """ DROP TABLE IF EXISTS dd_delta_person CASCADE; - CREATE TABLE - dd_delta_person(person_id bigint, + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_person CASCADE; + CREATE TABLE + dd_delta_person(person_id bigint, name text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_2 { - sql: """ DROP TABLE IF EXISTS dd_new_person CASCADE; - CREATE TABLE - dd_new_person(person_id bigint, + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_person CASCADE; + CREATE TABLE + dd_new_person(person_id bigint, name text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_5 { - sql: """ DROP TABLE IF EXISTS dd_delta_person_has_cancer CASCADE; - CREATE TABLE - dd_delta_person_has_cancer(person_id bigint, + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_person_has_cancer CASCADE; + CREATE TABLE + dd_delta_person_has_cancer(person_id bigint, has_cancer boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_18 { - sql: """ DROP TABLE IF EXISTS dd_new_smoke CASCADE; - CREATE TABLE - dd_new_smoke(person_id bigint, + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP TABLE IF EXISTS dd_new_smoke CASCADE; + CREATE TABLE + dd_new_smoke(person_id bigint, id bigint, label boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_21 { - sql: """ DROP TABLE IF EXISTS dd_delta_cancer CASCADE; - CREATE TABLE - dd_delta_cancer(person_id bigint, + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ DROP TABLE IF EXISTS dd_delta_cancer CASCADE; + CREATE TABLE + dd_delta_cancer(person_id bigint, id bigint, label boolean, dd_count int) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_22 { - sql: """ DROP TABLE IF EXISTS dd_new_cancer CASCADE; - CREATE TABLE - dd_new_cancer(person_id bigint, - id bigint, - label boolean, - dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_9 { - sql: """ DROP TABLE IF EXISTS dd_delta_person_smokes CASCADE; - CREATE TABLE - dd_delta_person_smokes(person_id bigint, + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_person_smokes CASCADE; + CREATE TABLE + dd_delta_person_smokes(person_id bigint, smokes boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_10 { - sql: """ DROP TABLE IF EXISTS dd_new_person_smokes CASCADE; - CREATE TABLE - dd_new_person_smokes(person_id bigint, + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_person_smokes CASCADE; + CREATE TABLE + dd_new_person_smokes(person_id bigint, smokes boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_14 { - sql: """ DROP TABLE IF EXISTS dd_new_friends CASCADE; - CREATE TABLE - dd_new_friends(person_id bigint, + deepdive.extraction.extractors.extraction_rule_14 { + sql: """ DROP TABLE IF EXISTS dd_new_friends CASCADE; + CREATE TABLE + dd_new_friends(person_id bigint, friend_id bigint, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } deepdive.extraction.extractors.cleanup { sql: """ @@ -162,7 +151,6 @@ dd_delta_smoke.label: Boolean TRUNCATE dd_delta_person_has_cancer; TRUNCATE dd_new_smoke; TRUNCATE dd_delta_cancer; - TRUNCATE dd_new_cancer; TRUNCATE dd_delta_person_smokes; TRUNCATE dd_new_person_smokes; TRUNCATE dd_new_friends; @@ -187,7 +175,7 @@ dd_delta_smoke.label: Boolean INSERT INTO dd_new_person_has_cancer SELECT R0.person_id, R0.has_cancer, R0.dd_count FROM person_has_cancer R0 - UNION + UNION ALL SELECT R0.person_id, R0.has_cancer, R0.dd_count FROM dd_delta_person_has_cancer R0 @@ -202,7 +190,7 @@ dd_delta_smoke.label: Boolean INSERT INTO dd_new_person SELECT R0.person_id, R0.name, R0.dd_count FROM person R0 - UNION + UNION ALL SELECT R0.person_id, R0.name, R0.dd_count FROM dd_delta_person R0 @@ -214,10 +202,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { sql: """ TRUNCATE dd_new_smoke; - INSERT INTO dd_new_smoke SELECT DISTINCT R0.person_id, 0 as id, label , R0.dd_count AS dd_count + INSERT INTO dd_new_smoke SELECT DISTINCT R0.person_id, id, label , R0.dd_count AS dd_count FROM smoke R0 - UNION SELECT DISTINCT R0.person_id, 0 as id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.person_id, id, label , R0.dd_count AS dd_count FROM dd_delta_smoke R0 @@ -240,11 +228,11 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_23 { - sql: """ TRUNCATE dd_new_cancer; - INSERT INTO dd_new_cancer SELECT DISTINCT R0.person_id, 0 as id, label , R0.dd_count AS dd_count + sql: """ + CREATE VIEW dd_new_cancer AS SELECT DISTINCT R0.person_id, id, label , R0.dd_count AS dd_count FROM cancer R0 - UNION SELECT DISTINCT R0.person_id, 0 as id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.person_id, id, label , R0.dd_count AS dd_count FROM dd_delta_cancer R0 @@ -259,7 +247,7 @@ dd_delta_smoke.label: Boolean INSERT INTO dd_new_person_smokes SELECT R0.person_id, R0.smokes, R0.dd_count FROM person_smokes R0 - UNION + UNION ALL SELECT R0.person_id, R0.smokes, R0.dd_count FROM dd_delta_person_smokes R0 @@ -274,7 +262,7 @@ dd_delta_smoke.label: Boolean INSERT INTO dd_new_friends SELECT R0.person_id, R0.friend_id, R0.dd_count FROM friends R0 - UNION + UNION ALL SELECT R0.person_id, R0.friend_id, R0.dd_count FROM dd_delta_friends R0 @@ -288,7 +276,7 @@ dd_delta_smoke.label: Boolean input_query: """ SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_delta_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_new_cancer R0, dd_delta_smoke R1, person_smokes R2 - WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id UNION + WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id UNION ALL SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_new_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_new_cancer R0, dd_new_smoke R1, dd_delta_person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ diff --git a/test/expected-output-test/smoke_example/compile-materialization.expected b/test/expected-output-test/smoke_example/compile-materialization.expected index 6296be46d..1c050a73a 100644 --- a/test/expected-output-test/smoke_example/compile-materialization.expected +++ b/test/expected-output-test/smoke_example/compile-materialization.expected @@ -18,64 +18,76 @@ cancer.label: Boolean } - deepdive.extraction.extractors.extraction_rule_2 { - sql: """ DROP TABLE IF EXISTS person_smokes CASCADE; - CREATE TABLE - person_smokes(person_id bigint, + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS person_smokes CASCADE; + CREATE TABLE + person_smokes(person_id bigint, smokes boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_4 { - sql: """ DROP TABLE IF EXISTS smoke CASCADE; - CREATE TABLE - smoke(person_id bigint, + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS smoke CASCADE; + CREATE TABLE + smoke(person_id bigint, id bigint, label boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_5 { - sql: """ DROP TABLE IF EXISTS cancer CASCADE; - CREATE TABLE - cancer(person_id bigint, + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS cancer CASCADE; + CREATE TABLE + cancer(person_id bigint, id bigint, label boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_3 { - sql: """ DROP TABLE IF EXISTS friends CASCADE; - CREATE TABLE - friends(person_id bigint, + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS friends CASCADE; + CREATE TABLE + friends(person_id bigint, friend_id bigint, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_1 { - sql: """ DROP TABLE IF EXISTS person_has_cancer CASCADE; - CREATE TABLE - person_has_cancer(person_id bigint, + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS person_has_cancer CASCADE; + CREATE TABLE + person_has_cancer(person_id bigint, has_cancer boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_0 { - sql: """ DROP TABLE IF EXISTS person CASCADE; - CREATE TABLE - person(person_id bigint, + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS person CASCADE; + CREATE TABLE + person(person_id bigint, name text, dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE person_smokes; + TRUNCATE smoke; + TRUNCATE cancer; + TRUNCATE friends; + TRUNCATE person_has_cancer; + TRUNCATE person; """ style: "sql_extractor" } @@ -117,3 +129,4 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_2, extraction_rule_4, extraction_rule_5, extraction_rule_3, extraction_rule_1, extraction_rule_0] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_6] deepdive.pipeline.pipelines.inference: [cancer_0] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/smoke_example/compile.expected b/test/expected-output-test/smoke_example/compile.expected index 16c907851..c5147d90d 100644 --- a/test/expected-output-test/smoke_example/compile.expected +++ b/test/expected-output-test/smoke_example/compile.expected @@ -18,58 +18,70 @@ cancer.label: Boolean } - deepdive.extraction.extractors.extraction_rule_2 { - sql: """ DROP TABLE IF EXISTS person_smokes CASCADE; - CREATE TABLE - person_smokes(person_id bigint, + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS person_smokes CASCADE; + CREATE TABLE + person_smokes(person_id bigint, smokes boolean) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_4 { - sql: """ DROP TABLE IF EXISTS smoke CASCADE; - CREATE TABLE - smoke(person_id bigint, + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS smoke CASCADE; + CREATE TABLE + smoke(person_id bigint, id bigint, label boolean) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_5 { - sql: """ DROP TABLE IF EXISTS cancer CASCADE; - CREATE TABLE - cancer(person_id bigint, + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS cancer CASCADE; + CREATE TABLE + cancer(person_id bigint, id bigint, label boolean) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_3 { - sql: """ DROP TABLE IF EXISTS friends CASCADE; - CREATE TABLE - friends(person_id bigint, + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS friends CASCADE; + CREATE TABLE + friends(person_id bigint, friend_id bigint) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_1 { - sql: """ DROP TABLE IF EXISTS person_has_cancer CASCADE; - CREATE TABLE - person_has_cancer(person_id bigint, + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS person_has_cancer CASCADE; + CREATE TABLE + person_has_cancer(person_id bigint, has_cancer boolean) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_0 { - sql: """ DROP TABLE IF EXISTS person CASCADE; - CREATE TABLE - person(person_id bigint, + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS person CASCADE; + CREATE TABLE + person(person_id bigint, name text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE person_smokes; + TRUNCATE smoke; + TRUNCATE cancer; + TRUNCATE friends; + TRUNCATE person_has_cancer; + TRUNCATE person; """ style: "sql_extractor" } diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index 439b69a1b..bcbd12095 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -23,77 +23,77 @@ dd_new_has_spouse.label: Boolean } - deepdive.extraction.extractors.extraction_rule_1 { - sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; - CREATE TABLE - dd_delta_articles(article_id text, + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; + CREATE TABLE + dd_delta_articles(article_id text, text text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_10 { - sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; - CREATE TABLE - dd_new_people_mentions(sentence_id text, + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; + CREATE TABLE + dd_new_people_mentions(sentence_id text, start_position int, length int, text text, mention_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_17 { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; - CREATE TABLE - dd_delta_has_spouse_features(relation_id text, + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; + CREATE TABLE + dd_delta_has_spouse_features(relation_id text, feature text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_2 { - sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; - CREATE TABLE - dd_new_articles(article_id text, + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; + CREATE TABLE + dd_new_articles(article_id text, text text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_14 { - sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; - CREATE TABLE - dd_new_has_spouse_candidates(person1_id text, + deepdive.extraction.extractors.extraction_rule_14 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; + CREATE TABLE + dd_new_has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, is_true boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_18 { - sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; - CREATE TABLE - dd_new_has_spouse_features(relation_id text, + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; + CREATE TABLE + dd_new_has_spouse_features(relation_id text, feature text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_6 { - sql: """ DROP TABLE IF EXISTS dd_new_sentences CASCADE; - CREATE TABLE - dd_new_sentences(document_id text, + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_sentences CASCADE; + CREATE TABLE + dd_new_sentences(document_id text, sentence text, words text, lemma text, @@ -103,41 +103,41 @@ dd_new_has_spouse.label: Boolean sentence_offset int, sentence_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_9 { - sql: """ DROP TABLE IF EXISTS dd_delta_people_mentions CASCADE; - CREATE TABLE - dd_delta_people_mentions(sentence_id text, + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_people_mentions CASCADE; + CREATE TABLE + dd_delta_people_mentions(sentence_id text, start_position int, length int, text text, mention_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_13 { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_candidates CASCADE; - CREATE TABLE - dd_delta_has_spouse_candidates(person1_id text, + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_candidates CASCADE; + CREATE TABLE + dd_delta_has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, is_true boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_5 { - sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; - CREATE TABLE - dd_delta_sentences(document_id text, + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; + CREATE TABLE + dd_delta_sentences(document_id text, sentence text, words text, lemma text, @@ -147,31 +147,20 @@ dd_new_has_spouse.label: Boolean sentence_offset int, sentence_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_21 { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; - CREATE TABLE - dd_delta_has_spouse(relation_id text, + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; + CREATE TABLE + dd_delta_has_spouse(relation_id text, id bigint, label boolean, dd_count int) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_22 { - sql: """ DROP TABLE IF EXISTS dd_new_has_spouse CASCADE; - CREATE TABLE - dd_new_has_spouse(relation_id text, - id bigint, - label boolean, - dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } deepdive.extraction.extractors.cleanup { sql: """ @@ -186,7 +175,6 @@ dd_new_has_spouse.label: Boolean TRUNCATE dd_delta_has_spouse_candidates; TRUNCATE dd_delta_sentences; TRUNCATE dd_delta_has_spouse; - TRUNCATE dd_new_has_spouse; """ style: "sql_extractor" } @@ -196,13 +184,13 @@ dd_new_has_spouse.label: Boolean CREATE VIEW dd_delta_ext_has_spouse_features_input AS SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id @@ -217,7 +205,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_people_mentions SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count FROM people_mentions R0 - UNION + UNION ALL SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count FROM dd_delta_people_mentions R0 @@ -232,7 +220,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_articles SELECT R0.article_id, R0.text, R0.dd_count FROM articles R0 - UNION + UNION ALL SELECT R0.article_id, R0.text, R0.dd_count FROM dd_delta_articles R0 @@ -259,7 +247,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_has_spouse_candidates SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count FROM has_spouse_candidates R0 - UNION + UNION ALL SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count FROM dd_delta_has_spouse_candidates R0 @@ -274,7 +262,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_has_spouse_features SELECT R0.relation_id, R0.feature, R0.dd_count FROM has_spouse_features R0 - UNION + UNION ALL SELECT R0.relation_id, R0.feature, R0.dd_count FROM dd_delta_has_spouse_features R0 @@ -289,7 +277,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_sentences SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count FROM sentences R0 - UNION + UNION ALL SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count FROM dd_delta_sentences R0 @@ -304,7 +292,7 @@ dd_new_has_spouse.label: Boolean CREATE VIEW dd_delta_ext_has_spouse_input AS SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" FROM dd_delta_people_mentions R0, people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id UNION + WHERE R1.sentence_id = R0.sentence_id UNION ALL SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 WHERE R1.sentence_id = R0.sentence_id @@ -327,11 +315,11 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_23 { - sql: """ TRUNCATE dd_new_has_spouse; - INSERT INTO dd_new_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + sql: """ + CREATE VIEW dd_new_has_spouse AS SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count FROM has_spouse R0 - UNION SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count FROM dd_delta_has_spouse R0 @@ -375,7 +363,7 @@ dd_new_has_spouse.label: Boolean input_query: """ SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index 7f90239da..b3e0309c9 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -17,10 +17,10 @@ } - deepdive.extraction.extractors.extraction_rule_1 { - sql: """ DROP TABLE IF EXISTS sentences CASCADE; - CREATE TABLE - sentences(document_id text, + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS sentences CASCADE; + CREATE TABLE + sentences(document_id text, sentence text, words text, lemma text, @@ -30,64 +30,76 @@ sentence_offset int, sentence_id text, dd_count int) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_3 { - sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; - CREATE TABLE - has_spouse_candidates(person1_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; + CREATE TABLE + has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, is_true boolean, dd_count int) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_5 { - sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; - CREATE TABLE - has_spouse(relation_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; + CREATE TABLE + has_spouse(relation_id text, id bigint, label boolean, dd_count int) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_0 { - sql: """ DROP TABLE IF EXISTS articles CASCADE; - CREATE TABLE - articles(article_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS articles CASCADE; + CREATE TABLE + articles(article_id text, text text, dd_count int) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_4 { - sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; - CREATE TABLE - has_spouse_features(relation_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; + CREATE TABLE + has_spouse_features(relation_id text, feature text, dd_count int) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_2 { - sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; - CREATE TABLE - people_mentions(sentence_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; + CREATE TABLE + people_mentions(sentence_id text, start_position int, length int, text text, mention_id text, dd_count int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE sentences; + TRUNCATE has_spouse_candidates; + TRUNCATE has_spouse; + TRUNCATE articles; + TRUNCATE has_spouse_features; + TRUNCATE people_mentions; """ style: "sql_extractor" } @@ -183,3 +195,4 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6] deepdive.pipeline.pipelines.inference: [has_spouse_0] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index 0a290eff3..ee490fafe 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -17,10 +17,10 @@ } - deepdive.extraction.extractors.extraction_rule_1 { - sql: """ DROP TABLE IF EXISTS sentences CASCADE; - CREATE TABLE - sentences(document_id text, + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS sentences CASCADE; + CREATE TABLE + sentences(document_id text, sentence text, words text, lemma text, @@ -29,59 +29,71 @@ ner_tags text, sentence_offset int, sentence_id text) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_3 { - sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; - CREATE TABLE - has_spouse_candidates(person1_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; + CREATE TABLE + has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, is_true boolean) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_5 { - sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; - CREATE TABLE - has_spouse(relation_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; + CREATE TABLE + has_spouse(relation_id text, id bigint, label boolean) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_0 { - sql: """ DROP TABLE IF EXISTS articles CASCADE; - CREATE TABLE - articles(article_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS articles CASCADE; + CREATE TABLE + articles(article_id text, text text) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_4 { - sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; - CREATE TABLE - has_spouse_features(relation_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; + CREATE TABLE + has_spouse_features(relation_id text, feature text) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_2 { - sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; - CREATE TABLE - people_mentions(sentence_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; + CREATE TABLE + people_mentions(sentence_id text, start_position int, length int, text text, mention_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE sentences; + TRUNCATE has_spouse_candidates; + TRUNCATE has_spouse; + TRUNCATE articles; + TRUNCATE has_spouse_features; + TRUNCATE people_mentions; """ style: "sql_extractor" } diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 52adc16f4..028b2ae9a 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -23,77 +23,77 @@ dd_new_has_spouse.label: Boolean } - deepdive.extraction.extractors.extraction_rule_1 { - sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; - CREATE TABLE - dd_delta_articles(article_id text, + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; + CREATE TABLE + dd_delta_articles(article_id text, text text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_10 { - sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; - CREATE TABLE - dd_new_people_mentions(sentence_id text, + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; + CREATE TABLE + dd_new_people_mentions(sentence_id text, start_position int, length int, text text, mention_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_17 { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; - CREATE TABLE - dd_delta_has_spouse_features(relation_id text, + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; + CREATE TABLE + dd_delta_has_spouse_features(relation_id text, feature text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_2 { - sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; - CREATE TABLE - dd_new_articles(article_id text, + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; + CREATE TABLE + dd_new_articles(article_id text, text text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_14 { - sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; - CREATE TABLE - dd_new_has_spouse_candidates(person1_id text, + deepdive.extraction.extractors.extraction_rule_14 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; + CREATE TABLE + dd_new_has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, is_true boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_18 { - sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; - CREATE TABLE - dd_new_has_spouse_features(relation_id text, + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; + CREATE TABLE + dd_new_has_spouse_features(relation_id text, feature text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_6 { - sql: """ DROP TABLE IF EXISTS dd_new_sentences CASCADE; - CREATE TABLE - dd_new_sentences(document_id text, + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_sentences CASCADE; + CREATE TABLE + dd_new_sentences(document_id text, sentence text, words text, lemma text, @@ -103,41 +103,41 @@ dd_new_has_spouse.label: Boolean sentence_offset int, sentence_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_9 { - sql: """ DROP TABLE IF EXISTS dd_delta_people_mentions CASCADE; - CREATE TABLE - dd_delta_people_mentions(sentence_id text, + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_people_mentions CASCADE; + CREATE TABLE + dd_delta_people_mentions(sentence_id text, start_position int, length int, text text, mention_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_13 { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_candidates CASCADE; - CREATE TABLE - dd_delta_has_spouse_candidates(person1_id text, + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_candidates CASCADE; + CREATE TABLE + dd_delta_has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, is_true boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_5 { - sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; - CREATE TABLE - dd_delta_sentences(document_id text, + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; + CREATE TABLE + dd_delta_sentences(document_id text, sentence text, words text, lemma text, @@ -147,31 +147,20 @@ dd_new_has_spouse.label: Boolean sentence_offset int, sentence_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_21 { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; - CREATE TABLE - dd_delta_has_spouse(relation_id text, + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; + CREATE TABLE + dd_delta_has_spouse(relation_id text, id bigint, label boolean, dd_count int) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_22 { - sql: """ DROP TABLE IF EXISTS dd_new_has_spouse CASCADE; - CREATE TABLE - dd_new_has_spouse(relation_id text, - id bigint, - label boolean, - dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } deepdive.extraction.extractors.cleanup { sql: """ @@ -186,7 +175,6 @@ dd_new_has_spouse.label: Boolean TRUNCATE dd_delta_has_spouse_candidates; TRUNCATE dd_delta_sentences; TRUNCATE dd_delta_has_spouse; - TRUNCATE dd_new_has_spouse; """ style: "sql_extractor" } @@ -196,13 +184,13 @@ dd_new_has_spouse.label: Boolean CREATE VIEW dd_delta_ext_has_spouse_features_input AS SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id @@ -217,7 +205,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_people_mentions SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count FROM people_mentions R0 - UNION + UNION ALL SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count FROM dd_delta_people_mentions R0 @@ -232,7 +220,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_articles SELECT R0.article_id, R0.text, R0.dd_count FROM articles R0 - UNION + UNION ALL SELECT R0.article_id, R0.text, R0.dd_count FROM dd_delta_articles R0 @@ -247,7 +235,7 @@ dd_new_has_spouse.label: Boolean CREATE VIEW dd_delta_ext_people_input AS SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" , R0.dd_count AS "dd_count" FROM sentences R0 - UNION + UNION ALL SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" FROM dd_delta_sentences R0 @@ -262,7 +250,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_has_spouse_candidates SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count FROM has_spouse_candidates R0 - UNION + UNION ALL SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count FROM dd_delta_has_spouse_candidates R0 @@ -277,7 +265,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_has_spouse_features SELECT R0.relation_id, R0.feature, R0.dd_count FROM has_spouse_features R0 - UNION + UNION ALL SELECT R0.relation_id, R0.feature, R0.dd_count FROM dd_delta_has_spouse_features R0 @@ -292,7 +280,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_sentences SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count FROM sentences R0 - UNION + UNION ALL SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count FROM dd_delta_sentences R0 @@ -307,7 +295,7 @@ dd_new_has_spouse.label: Boolean CREATE VIEW dd_delta_ext_has_spouse_input AS SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" FROM dd_delta_people_mentions R0, people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id UNION + WHERE R1.sentence_id = R0.sentence_id UNION ALL SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 WHERE R1.sentence_id = R0.sentence_id @@ -330,11 +318,11 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_23 { - sql: """ TRUNCATE dd_new_has_spouse; - INSERT INTO dd_new_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + sql: """ + CREATE VIEW dd_new_has_spouse AS SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count FROM has_spouse R0 - UNION SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count FROM dd_delta_has_spouse R0 @@ -378,7 +366,7 @@ dd_new_has_spouse.label: Boolean input_query: """ SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ diff --git a/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected index c3391a893..d6ed9c2e6 100644 --- a/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected @@ -23,77 +23,77 @@ dd_new_has_spouse.label: Boolean } - deepdive.extraction.extractors.extraction_rule_1 { - sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; - CREATE TABLE - dd_delta_articles(article_id text, + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; + CREATE TABLE + dd_delta_articles(article_id text, text text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_10 { - sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; - CREATE TABLE - dd_new_people_mentions(sentence_id text, + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_people_mentions CASCADE; + CREATE TABLE + dd_new_people_mentions(sentence_id text, start_position int, length int, text text, mention_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_17 { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; - CREATE TABLE - dd_delta_has_spouse_features(relation_id text, + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; + CREATE TABLE + dd_delta_has_spouse_features(relation_id text, feature text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_2 { - sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; - CREATE TABLE - dd_new_articles(article_id text, + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; + CREATE TABLE + dd_new_articles(article_id text, text text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_14 { - sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; - CREATE TABLE - dd_new_has_spouse_candidates(person1_id text, + deepdive.extraction.extractors.extraction_rule_14 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_candidates CASCADE; + CREATE TABLE + dd_new_has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, is_true boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_18 { - sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; - CREATE TABLE - dd_new_has_spouse_features(relation_id text, + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; + CREATE TABLE + dd_new_has_spouse_features(relation_id text, feature text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_6 { - sql: """ DROP TABLE IF EXISTS dd_new_sentences CASCADE; - CREATE TABLE - dd_new_sentences(document_id text, + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_sentences CASCADE; + CREATE TABLE + dd_new_sentences(document_id text, sentence text, words text, lemma text, @@ -103,41 +103,41 @@ dd_new_has_spouse.label: Boolean sentence_offset int, sentence_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_9 { - sql: """ DROP TABLE IF EXISTS dd_delta_people_mentions CASCADE; - CREATE TABLE - dd_delta_people_mentions(sentence_id text, + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_people_mentions CASCADE; + CREATE TABLE + dd_delta_people_mentions(sentence_id text, start_position int, length int, text text, mention_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_13 { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_candidates CASCADE; - CREATE TABLE - dd_delta_has_spouse_candidates(person1_id text, + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_candidates CASCADE; + CREATE TABLE + dd_delta_has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, is_true boolean, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_5 { - sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; - CREATE TABLE - dd_delta_sentences(document_id text, + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_sentences CASCADE; + CREATE TABLE + dd_delta_sentences(document_id text, sentence text, words text, lemma text, @@ -147,31 +147,20 @@ dd_new_has_spouse.label: Boolean sentence_offset int, sentence_id text, dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } - deepdive.extraction.extractors.extraction_rule_21 { - sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; - CREATE TABLE - dd_delta_has_spouse(relation_id text, + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse CASCADE; + CREATE TABLE + dd_delta_has_spouse(relation_id text, id bigint, label boolean, dd_count int) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_22 { - sql: """ DROP TABLE IF EXISTS dd_new_has_spouse CASCADE; - CREATE TABLE - dd_new_has_spouse(relation_id text, - id bigint, - label boolean, - dd_count int) - """ - style: "sql_extractor" - } + """ + style: "sql_extractor" + } deepdive.extraction.extractors.cleanup { sql: """ @@ -186,7 +175,6 @@ dd_new_has_spouse.label: Boolean TRUNCATE dd_delta_has_spouse_candidates; TRUNCATE dd_delta_sentences; TRUNCATE dd_delta_has_spouse; - TRUNCATE dd_new_has_spouse; """ style: "sql_extractor" } @@ -196,13 +184,13 @@ dd_new_has_spouse.label: Boolean CREATE VIEW dd_delta_ext_has_spouse_features_input AS SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id @@ -217,7 +205,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_people_mentions SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count FROM people_mentions R0 - UNION + UNION ALL SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count FROM dd_delta_people_mentions R0 @@ -232,7 +220,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_articles SELECT R0.article_id, R0.text, R0.dd_count FROM articles R0 - UNION + UNION ALL SELECT R0.article_id, R0.text, R0.dd_count FROM dd_delta_articles R0 @@ -259,7 +247,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_has_spouse_candidates SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count FROM has_spouse_candidates R0 - UNION + UNION ALL SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count FROM dd_delta_has_spouse_candidates R0 @@ -274,7 +262,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_has_spouse_features SELECT R0.relation_id, R0.feature, R0.dd_count FROM has_spouse_features R0 - UNION + UNION ALL SELECT R0.relation_id, R0.feature, R0.dd_count FROM dd_delta_has_spouse_features R0 @@ -289,7 +277,7 @@ dd_new_has_spouse.label: Boolean INSERT INTO dd_new_sentences SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count FROM sentences R0 - UNION + UNION ALL SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count FROM dd_delta_sentences R0 @@ -304,7 +292,7 @@ dd_new_has_spouse.label: Boolean CREATE VIEW dd_delta_ext_has_spouse_input AS SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" FROM dd_delta_people_mentions R0, people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id UNION + WHERE R1.sentence_id = R0.sentence_id UNION ALL SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 WHERE R1.sentence_id = R0.sentence_id @@ -327,11 +315,11 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_23 { - sql: """ TRUNCATE dd_new_has_spouse; - INSERT INTO dd_new_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + sql: """ + CREATE VIEW dd_new_has_spouse AS SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count FROM has_spouse R0 - UNION SELECT DISTINCT R0.relation_id, 0 as id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count FROM dd_delta_has_spouse R0 @@ -375,7 +363,7 @@ dd_new_has_spouse.label: Boolean input_query: """ SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ diff --git a/test/expected-output-test/spouse_example_new_inference/compile.expected b/test/expected-output-test/spouse_example_new_inference/compile.expected index 981f3cee2..d516477b9 100644 --- a/test/expected-output-test/spouse_example_new_inference/compile.expected +++ b/test/expected-output-test/spouse_example_new_inference/compile.expected @@ -17,10 +17,10 @@ } - deepdive.extraction.extractors.extraction_rule_1 { - sql: """ DROP TABLE IF EXISTS sentences CASCADE; - CREATE TABLE - sentences(document_id text, + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS sentences CASCADE; + CREATE TABLE + sentences(document_id text, sentence text, words text, lemma text, @@ -29,59 +29,71 @@ ner_tags text, sentence_offset int, sentence_id text) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_3 { - sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; - CREATE TABLE - has_spouse_candidates(person1_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS has_spouse_candidates CASCADE; + CREATE TABLE + has_spouse_candidates(person1_id text, person2_id text, sentence_id text, description text, relation_id text, is_true boolean) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_5 { - sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; - CREATE TABLE - has_spouse(relation_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS has_spouse CASCADE; + CREATE TABLE + has_spouse(relation_id text, id bigint, label boolean) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_0 { - sql: """ DROP TABLE IF EXISTS articles CASCADE; - CREATE TABLE - articles(article_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS articles CASCADE; + CREATE TABLE + articles(article_id text, text text) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_4 { - sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; - CREATE TABLE - has_spouse_features(relation_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; + CREATE TABLE + has_spouse_features(relation_id text, feature text) - """ - style: "sql_extractor" - } - - deepdive.extraction.extractors.extraction_rule_2 { - sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; - CREATE TABLE - people_mentions(sentence_id text, + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS people_mentions CASCADE; + CREATE TABLE + people_mentions(sentence_id text, start_position int, length int, text text, mention_id text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE sentences; + TRUNCATE has_spouse_candidates; + TRUNCATE has_spouse; + TRUNCATE articles; + TRUNCATE has_spouse_features; + TRUNCATE people_mentions; """ style: "sql_extractor" } From 6dab902da9dab5e7044ca420502caba31300dcff Mon Sep 17 00:00:00 2001 From: senwu Date: Fri, 12 Jun 2015 17:57:27 -0700 Subject: [PATCH 134/347] get rid of dd_count --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 48 ++--------- .../compile-incremental.expected | 65 +++++++-------- .../compile-materialization.expected | 24 +++--- .../smoke_example/compile-merge.expected | 20 ++--- .../compile-incremental.expected | 77 ++++++++---------- .../compile-materialization.expected | 28 +++---- .../spouse_example/compile-merge.expected | 25 +++--- .../spouse_example/compile.expected | 4 +- .../compile-incremental.expected | 79 ++++++++----------- .../compile-incremental.expected | 79 ++++++++----------- .../compile.expected | 6 +- 11 files changed, 177 insertions(+), 278 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index e2b1dc3e7..289c1bfbe 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -98,8 +98,6 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C var mode : Mode = ORIGINAL - var useDeltaCount : Boolean = false - // Mapping head names to the actual statements var schemaDeclarationGroupByHead : Map[String, List[SchemaDeclaration]] = new HashMap[String, List[SchemaDeclaration]]() var extractionRuleGroupByHead : Map[String, List[ExtractionRule]] = new HashMap[String, List[ExtractionRule]]() @@ -110,10 +108,6 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C def init() = { // generate the statements. mode = config.mode - useDeltaCount = mode match { - case ORIGINAL => false - case _ => true - } statements.foreach { case SchemaDeclaration(Attribute(r, terms, types), isQuery) => { terms.foreach { @@ -319,7 +313,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" } if (stmt.isQuery) columnDecls = columnDecls :+ "id bigint" :+ "label boolean" - if (ss.useDeltaCount) columnDecls = columnDecls :+ "dd_count int" val indentation = " " * stmt.a.name.length val blockName = ss.resolveExtractorBlockName(stmt) schemas += s""" @@ -363,9 +356,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val name = ss.resolveName(qs.getVar(stmt.supervision)) val labelCol = s"R${index}.${name}" val headTermsStr = ( headTerms :+ "0 as id" ).mkString(", ") - val ddCount = if (ss.useDeltaCount) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" - inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ddCountStr} + inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, ${labelCol} AS label ${ ss.generateSQLBody(tmpCq) } """ } else if ((ss.schemaDeclarationGroupByHead contains stmt.q.head.name) && (ss.schemaDeclarationGroupByHead(stmt.q.head.name)(0).isQuery) && (stmt.q.head.name startsWith "dd_new_")) { @@ -373,9 +364,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" } val headTermsStr = ( headTerms :+ "id" ).mkString(", ") - val ddCount = if (ss.useDeltaCount) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s", ${ddCount} AS dd_count" else "" - inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, label ${ddCountStr} + inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, label ${ ss.generateSQLBody(tmpCq) } """ } else { @@ -395,21 +384,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val selectStr = variableCols.mkString(", ") - var ddCount = if (ss.useDeltaCount) ( tmpCq.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - ddCount = ss.mode match { - case MERGE => s"SUM(${ddCount})" - case _ => ddCount - } - val ddCountStr = if (ddCount.length > 0) { - if (!tmpCqUseOnlyOriginal) s""", ${ddCount} AS \"dd_count\" """ else s", ${ddCount}" - } else "" - val groupBy = ss.mode match { - case MERGE => s" GROUP BY ${selectStr}" - case _ => "" - } inputQueries += s""" - SELECT ${selectStr}${ddCountStr} - ${ ss.generateSQLBody(tmpCq) }${ groupBy }""" + SELECT ${selectStr} + ${ ss.generateSQLBody(tmpCq) }""" } } } @@ -429,15 +406,10 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } val sqlCmdForInsert = if (createTable) "INSERT INTO" else "CREATE VIEW" val useAS = if (createTable) "" else " AS" - val cleanUp = ss.mode match { - case MERGE => s"""; - DELETE FROM ${stmts(0).q.head.name} WHERE dd_count = 0;""" - case _ => "" - } val extractor = s""" deepdive.extraction.extractors.${blockName} { sql: \"\"\" ${sqlCmdForCleanUp} - ${sqlCmdForInsert} ${stmts(0).q.head.name}${useAS} ${inputQueries.mkString(" UNION ALL ")}${cleanUp} + ${sqlCmdForInsert} ${stmts(0).q.head.name}${useAS} ${inputQueries.mkString(" UNION ALL ")} \"\"\" style: "sql_extractor" ${ss.generateDependenciesOfCompiledBlockFor(stmts)} @@ -489,7 +461,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { for (cqBody <- stmt.q.bodies) { // edge query val fakeBody = stmt.q.head +: cqBody - // val fakeBody = stmt.q.bodies +: List(stmt.q.head) val fakeCQ = ConjunctiveQuery(stmt.q.head, List(fakeBody)) // we will just use the fakeBody below. val index = cqBody.length + 1 @@ -510,12 +481,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val selectStr = (List(variableIdsStr, uwStr) flatten).mkString(", ") - val ddCount = if (ss.useDeltaCount) ( fakeCQ.bodies(0).zipWithIndex map { case(x,i) => s"R${i}.dd_count"}).mkString(" * ") else "" - val ddCountStr = if (ddCount.length > 0) s""", ${ddCount} AS \"dd_count\" """ else "" - // factor input query inputQueries += s""" - SELECT ${selectStr} ${ddCountStr} + SELECT ${selectStr} ${ ss.generateSQLBody(fakeCQ) }""" // factor function if (func.length == 0) { @@ -660,9 +628,5 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // emit the generated code blocks foreach println - - // if (config.isIncremental) { - // // TODO emit extra extractor for moving rows of dd_delta_* to * - // } } } diff --git a/test/expected-output-test/smoke_example/compile-incremental.expected b/test/expected-output-test/smoke_example/compile-incremental.expected index 08154a6ce..b547dc8df 100644 --- a/test/expected-output-test/smoke_example/compile-incremental.expected +++ b/test/expected-output-test/smoke_example/compile-incremental.expected @@ -32,8 +32,7 @@ dd_delta_smoke.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_friends CASCADE; CREATE TABLE dd_delta_friends(person_id bigint, - friend_id bigint, - dd_count int) + friend_id bigint) """ style: "sql_extractor" } @@ -43,8 +42,7 @@ dd_delta_smoke.label: Boolean CREATE TABLE dd_delta_smoke(person_id bigint, id bigint, - label boolean, - dd_count int) + label boolean) """ style: "sql_extractor" } @@ -53,8 +51,7 @@ dd_delta_smoke.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_person_has_cancer CASCADE; CREATE TABLE dd_new_person_has_cancer(person_id bigint, - has_cancer boolean, - dd_count int) + has_cancer boolean) """ style: "sql_extractor" } @@ -63,8 +60,7 @@ dd_delta_smoke.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_person CASCADE; CREATE TABLE dd_delta_person(person_id bigint, - name text, - dd_count int) + name text) """ style: "sql_extractor" } @@ -73,8 +69,7 @@ dd_delta_smoke.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_person CASCADE; CREATE TABLE dd_new_person(person_id bigint, - name text, - dd_count int) + name text) """ style: "sql_extractor" } @@ -83,8 +78,7 @@ dd_delta_smoke.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_person_has_cancer CASCADE; CREATE TABLE dd_delta_person_has_cancer(person_id bigint, - has_cancer boolean, - dd_count int) + has_cancer boolean) """ style: "sql_extractor" } @@ -94,8 +88,7 @@ dd_delta_smoke.label: Boolean CREATE TABLE dd_new_smoke(person_id bigint, id bigint, - label boolean, - dd_count int) + label boolean) """ style: "sql_extractor" } @@ -105,8 +98,7 @@ dd_delta_smoke.label: Boolean CREATE TABLE dd_delta_cancer(person_id bigint, id bigint, - label boolean, - dd_count int) + label boolean) """ style: "sql_extractor" } @@ -115,8 +107,7 @@ dd_delta_smoke.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_person_smokes CASCADE; CREATE TABLE dd_delta_person_smokes(person_id bigint, - smokes boolean, - dd_count int) + smokes boolean) """ style: "sql_extractor" } @@ -125,8 +116,7 @@ dd_delta_smoke.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_person_smokes CASCADE; CREATE TABLE dd_new_person_smokes(person_id bigint, - smokes boolean, - dd_count int) + smokes boolean) """ style: "sql_extractor" } @@ -135,8 +125,7 @@ dd_delta_smoke.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_friends CASCADE; CREATE TABLE dd_new_friends(person_id bigint, - friend_id bigint, - dd_count int) + friend_id bigint) """ style: "sql_extractor" } @@ -160,7 +149,7 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_24 { sql: """ - INSERT INTO dd_delta_smoke SELECT DISTINCT R0.person_id, 0 as id, R0.smokes AS label , R0.dd_count AS dd_count + INSERT INTO dd_delta_smoke SELECT DISTINCT R0.person_id, 0 as id, R0.smokes AS label FROM dd_delta_person_smokes R0 @@ -173,10 +162,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_person_has_cancer; INSERT INTO dd_new_person_has_cancer - SELECT R0.person_id, R0.has_cancer, R0.dd_count + SELECT R0.person_id, R0.has_cancer FROM person_has_cancer R0 UNION ALL - SELECT R0.person_id, R0.has_cancer, R0.dd_count + SELECT R0.person_id, R0.has_cancer FROM dd_delta_person_has_cancer R0 """ @@ -188,10 +177,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_person; INSERT INTO dd_new_person - SELECT R0.person_id, R0.name, R0.dd_count + SELECT R0.person_id, R0.name FROM person R0 UNION ALL - SELECT R0.person_id, R0.name, R0.dd_count + SELECT R0.person_id, R0.name FROM dd_delta_person R0 """ @@ -202,10 +191,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { sql: """ TRUNCATE dd_new_smoke; - INSERT INTO dd_new_smoke SELECT DISTINCT R0.person_id, id, label , R0.dd_count AS dd_count + INSERT INTO dd_new_smoke SELECT DISTINCT R0.person_id, id, label FROM smoke R0 - UNION ALL SELECT DISTINCT R0.person_id, id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.person_id, id, label FROM dd_delta_smoke R0 @@ -217,7 +206,7 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_25 { sql: """ - INSERT INTO dd_delta_cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label , R0.dd_count AS dd_count + INSERT INTO dd_delta_cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label FROM dd_delta_person_has_cancer R0 @@ -229,10 +218,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_23 { sql: """ - CREATE VIEW dd_new_cancer AS SELECT DISTINCT R0.person_id, id, label , R0.dd_count AS dd_count + CREATE VIEW dd_new_cancer AS SELECT DISTINCT R0.person_id, id, label FROM cancer R0 - UNION ALL SELECT DISTINCT R0.person_id, id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.person_id, id, label FROM dd_delta_cancer R0 @@ -245,10 +234,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_person_smokes; INSERT INTO dd_new_person_smokes - SELECT R0.person_id, R0.smokes, R0.dd_count + SELECT R0.person_id, R0.smokes FROM person_smokes R0 UNION ALL - SELECT R0.person_id, R0.smokes, R0.dd_count + SELECT R0.person_id, R0.smokes FROM dd_delta_person_smokes R0 """ @@ -260,10 +249,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE dd_new_friends; INSERT INTO dd_new_friends - SELECT R0.person_id, R0.friend_id, R0.dd_count + SELECT R0.person_id, R0.friend_id FROM friends R0 UNION ALL - SELECT R0.person_id, R0.friend_id, R0.dd_count + SELECT R0.person_id, R0.friend_id FROM dd_delta_friends R0 """ @@ -274,10 +263,10 @@ dd_delta_smoke.label: Boolean deepdive.inference.factors.dd_new_cancer_0 { input_query: """ - SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_delta_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_delta_smoke.R1.id" FROM dd_new_cancer R0, dd_delta_smoke R1, person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id UNION ALL - SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_new_smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_new_smoke.R1.id" FROM dd_new_cancer R0, dd_new_smoke R1, dd_delta_person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(dd_delta_smoke.R1.label, dd_new_cancer.R0.label)" diff --git a/test/expected-output-test/smoke_example/compile-materialization.expected b/test/expected-output-test/smoke_example/compile-materialization.expected index 1c050a73a..6ed2190ad 100644 --- a/test/expected-output-test/smoke_example/compile-materialization.expected +++ b/test/expected-output-test/smoke_example/compile-materialization.expected @@ -22,8 +22,7 @@ cancer.label: Boolean sql: """ DROP TABLE IF EXISTS person_smokes CASCADE; CREATE TABLE person_smokes(person_id bigint, - smokes boolean, - dd_count int) + smokes boolean) """ style: "sql_extractor" } @@ -33,8 +32,7 @@ cancer.label: Boolean CREATE TABLE smoke(person_id bigint, id bigint, - label boolean, - dd_count int) + label boolean) """ style: "sql_extractor" } @@ -44,8 +42,7 @@ cancer.label: Boolean CREATE TABLE cancer(person_id bigint, id bigint, - label boolean, - dd_count int) + label boolean) """ style: "sql_extractor" } @@ -54,8 +51,7 @@ cancer.label: Boolean sql: """ DROP TABLE IF EXISTS friends CASCADE; CREATE TABLE friends(person_id bigint, - friend_id bigint, - dd_count int) + friend_id bigint) """ style: "sql_extractor" } @@ -64,8 +60,7 @@ cancer.label: Boolean sql: """ DROP TABLE IF EXISTS person_has_cancer CASCADE; CREATE TABLE person_has_cancer(person_id bigint, - has_cancer boolean, - dd_count int) + has_cancer boolean) """ style: "sql_extractor" } @@ -74,8 +69,7 @@ cancer.label: Boolean sql: """ DROP TABLE IF EXISTS person CASCADE; CREATE TABLE person(person_id bigint, - name text, - dd_count int) + name text) """ style: "sql_extractor" } @@ -94,7 +88,7 @@ cancer.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ - INSERT INTO cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label , R0.dd_count AS dd_count + INSERT INTO cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label FROM person_has_cancer R0 @@ -106,7 +100,7 @@ cancer.label: Boolean deepdive.extraction.extractors.extraction_rule_6 { sql: """ - INSERT INTO smoke SELECT DISTINCT R0.person_id, 0 as id, R0.smokes AS label , R0.dd_count AS dd_count + INSERT INTO smoke SELECT DISTINCT R0.person_id, 0 as id, R0.smokes AS label FROM person_smokes R0 @@ -118,7 +112,7 @@ cancer.label: Boolean deepdive.inference.factors.cancer_0 { input_query: """ - SELECT R0.id AS "cancer.R0.id" , R1.id AS "smoke.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "cancer.R0.id" , R1.id AS "smoke.R1.id" FROM cancer R0, smoke R1, person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(smoke.R1.label, cancer.R0.label)" diff --git a/test/expected-output-test/smoke_example/compile-merge.expected b/test/expected-output-test/smoke_example/compile-merge.expected index 10a177f1e..487fc94a3 100644 --- a/test/expected-output-test/smoke_example/compile-merge.expected +++ b/test/expected-output-test/smoke_example/compile-merge.expected @@ -20,10 +20,9 @@ deepdive.extraction.extractors.extraction_rule_2 { sql: """ TRUNCATE person_smokes; INSERT INTO person_smokes - SELECT R0.person_id, R0.smokes, SUM(R0.dd_count) + SELECT R0.person_id, R0.smokes FROM dd_new_person_smokes R0 - GROUP BY R0.person_id, R0.smokes; - DELETE FROM person_smokes WHERE dd_count = 0; + """ style: "sql_extractor" @@ -33,10 +32,9 @@ deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE friends; INSERT INTO friends - SELECT R0.person_id, R0.friend_id, SUM(R0.dd_count) + SELECT R0.person_id, R0.friend_id FROM dd_new_friends R0 - GROUP BY R0.person_id, R0.friend_id; - DELETE FROM friends WHERE dd_count = 0; + """ style: "sql_extractor" @@ -46,10 +44,9 @@ deepdive.extraction.extractors.extraction_rule_1 { sql: """ TRUNCATE person_has_cancer; INSERT INTO person_has_cancer - SELECT R0.person_id, R0.has_cancer, SUM(R0.dd_count) + SELECT R0.person_id, R0.has_cancer FROM dd_new_person_has_cancer R0 - GROUP BY R0.person_id, R0.has_cancer; - DELETE FROM person_has_cancer WHERE dd_count = 0; + """ style: "sql_extractor" @@ -59,10 +56,9 @@ deepdive.extraction.extractors.extraction_rule_0 { sql: """ TRUNCATE person; INSERT INTO person - SELECT R0.person_id, R0.name, SUM(R0.dd_count) + SELECT R0.person_id, R0.name FROM dd_new_person R0 - GROUP BY R0.person_id, R0.name; - DELETE FROM person WHERE dd_count = 0; + """ style: "sql_extractor" diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index bcbd12095..beee5b073 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -27,8 +27,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; CREATE TABLE dd_delta_articles(article_id text, - text text, - dd_count int) + text text) """ style: "sql_extractor" } @@ -40,8 +39,7 @@ dd_new_has_spouse.label: Boolean start_position int, length int, text text, - mention_id text, - dd_count int) + mention_id text) """ style: "sql_extractor" } @@ -50,8 +48,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; CREATE TABLE dd_delta_has_spouse_features(relation_id text, - feature text, - dd_count int) + feature text) """ style: "sql_extractor" } @@ -60,8 +57,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; CREATE TABLE dd_new_articles(article_id text, - text text, - dd_count int) + text text) """ style: "sql_extractor" } @@ -74,8 +70,7 @@ dd_new_has_spouse.label: Boolean sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int) + is_true boolean) """ style: "sql_extractor" } @@ -84,8 +79,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; CREATE TABLE dd_new_has_spouse_features(relation_id text, - feature text, - dd_count int) + feature text) """ style: "sql_extractor" } @@ -101,8 +95,7 @@ dd_new_has_spouse.label: Boolean dependencies text, ner_tags text, sentence_offset int, - sentence_id text, - dd_count int) + sentence_id text) """ style: "sql_extractor" } @@ -114,8 +107,7 @@ dd_new_has_spouse.label: Boolean start_position int, length int, text text, - mention_id text, - dd_count int) + mention_id text) """ style: "sql_extractor" } @@ -128,8 +120,7 @@ dd_new_has_spouse.label: Boolean sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int) + is_true boolean) """ style: "sql_extractor" } @@ -145,8 +136,7 @@ dd_new_has_spouse.label: Boolean dependencies text, ner_tags text, sentence_offset int, - sentence_id text, - dd_count int) + sentence_id text) """ style: "sql_extractor" } @@ -156,8 +146,7 @@ dd_new_has_spouse.label: Boolean CREATE TABLE dd_delta_has_spouse(relation_id text, id bigint, - label boolean, - dd_count int) + label boolean) """ style: "sql_extractor" } @@ -182,16 +171,16 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_31 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ @@ -203,10 +192,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_people_mentions; INSERT INTO dd_new_people_mentions - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM people_mentions R0 UNION ALL - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM dd_delta_people_mentions R0 """ @@ -218,10 +207,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_articles; INSERT INTO dd_new_articles - SELECT R0.article_id, R0.text, R0.dd_count + SELECT R0.article_id, R0.text FROM articles R0 UNION ALL - SELECT R0.article_id, R0.text, R0.dd_count + SELECT R0.article_id, R0.text FROM dd_delta_articles R0 """ @@ -233,7 +222,7 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_25 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS - SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" FROM dd_delta_sentences R0 """ @@ -245,10 +234,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE dd_new_has_spouse_candidates; INSERT INTO dd_new_has_spouse_candidates - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM has_spouse_candidates R0 UNION ALL - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM dd_delta_has_spouse_candidates R0 """ @@ -260,10 +249,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { sql: """ TRUNCATE dd_new_has_spouse_features; INSERT INTO dd_new_has_spouse_features - SELECT R0.relation_id, R0.feature, R0.dd_count + SELECT R0.relation_id, R0.feature FROM has_spouse_features R0 UNION ALL - SELECT R0.relation_id, R0.feature, R0.dd_count + SELECT R0.relation_id, R0.feature FROM dd_delta_has_spouse_features R0 """ @@ -275,10 +264,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_sentences; INSERT INTO dd_new_sentences - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM sentences R0 UNION ALL - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM dd_delta_sentences R0 """ @@ -290,10 +279,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM dd_delta_people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id UNION ALL - SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ @@ -304,7 +293,7 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_33 { sql: """ - INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count + INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label FROM dd_delta_has_spouse_candidates R0 @@ -316,10 +305,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_23 { sql: """ - CREATE VIEW dd_new_has_spouse AS SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count + CREATE VIEW dd_new_has_spouse AS SELECT DISTINCT R0.relation_id, id, label FROM has_spouse R0 - UNION ALL SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.relation_id, id, label FROM dd_delta_has_spouse R0 @@ -361,10 +350,10 @@ dd_new_has_spouse.label: Boolean deepdive.inference.factors.dd_new_has_spouse_0 { input_query: """ - SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL - SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(dd_new_has_spouse.R0.label)" diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index b3e0309c9..706b2bbac 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -28,8 +28,7 @@ dependencies text, ner_tags text, sentence_offset int, - sentence_id text, - dd_count int) + sentence_id text) """ style: "sql_extractor" } @@ -42,8 +41,7 @@ sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int) + is_true boolean) """ style: "sql_extractor" } @@ -53,8 +51,7 @@ CREATE TABLE has_spouse(relation_id text, id bigint, - label boolean, - dd_count int) + label boolean) """ style: "sql_extractor" } @@ -63,8 +60,7 @@ sql: """ DROP TABLE IF EXISTS articles CASCADE; CREATE TABLE articles(article_id text, - text text, - dd_count int) + text text) """ style: "sql_extractor" } @@ -73,8 +69,7 @@ sql: """ DROP TABLE IF EXISTS has_spouse_features CASCADE; CREATE TABLE has_spouse_features(relation_id text, - feature text, - dd_count int) + feature text) """ style: "sql_extractor" } @@ -86,8 +81,7 @@ start_position int, length int, text text, - mention_id text, - dd_count int) + mention_id text) """ style: "sql_extractor" } @@ -106,7 +100,7 @@ deepdive.extraction.extractors.extraction_rule_15 { sql: """ - INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count + INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label FROM has_spouse_candidates R0 @@ -119,7 +113,7 @@ deepdive.extraction.extractors.extraction_rule_7 { sql: """ DROP VIEW IF EXISTS ext_people_input; CREATE VIEW ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" FROM sentences R0 """ @@ -131,7 +125,7 @@ deepdive.extraction.extractors.extraction_rule_13 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; CREATE VIEW ext_has_spouse_features_input AS - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ @@ -143,7 +137,7 @@ deepdive.extraction.extractors.extraction_rule_10 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; CREATE VIEW ext_has_spouse_input AS - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ @@ -184,7 +178,7 @@ deepdive.inference.factors.has_spouse_0 { input_query: """ - SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(has_spouse.R0.label)" diff --git a/test/expected-output-test/spouse_example/compile-merge.expected b/test/expected-output-test/spouse_example/compile-merge.expected index b0dfc4a2f..9a972542f 100644 --- a/test/expected-output-test/spouse_example/compile-merge.expected +++ b/test/expected-output-test/spouse_example/compile-merge.expected @@ -20,10 +20,9 @@ deepdive.extraction.extractors.extraction_rule_1 { sql: """ TRUNCATE sentences; INSERT INTO sentences - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, SUM(R0.dd_count) + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM dd_new_sentences R0 - GROUP BY R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id; - DELETE FROM sentences WHERE dd_count = 0; + """ style: "sql_extractor" @@ -33,10 +32,9 @@ deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE has_spouse_candidates; INSERT INTO has_spouse_candidates - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, SUM(R0.dd_count) + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM dd_new_has_spouse_candidates R0 - GROUP BY R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true; - DELETE FROM has_spouse_candidates WHERE dd_count = 0; + """ style: "sql_extractor" @@ -46,10 +44,9 @@ deepdive.extraction.extractors.extraction_rule_0 { sql: """ TRUNCATE articles; INSERT INTO articles - SELECT R0.article_id, R0.text, SUM(R0.dd_count) + SELECT R0.article_id, R0.text FROM dd_new_articles R0 - GROUP BY R0.article_id, R0.text; - DELETE FROM articles WHERE dd_count = 0; + """ style: "sql_extractor" @@ -59,10 +56,9 @@ deepdive.extraction.extractors.extraction_rule_4 { sql: """ TRUNCATE has_spouse_features; INSERT INTO has_spouse_features - SELECT R0.relation_id, R0.feature, SUM(R0.dd_count) + SELECT R0.relation_id, R0.feature FROM dd_new_has_spouse_features R0 - GROUP BY R0.relation_id, R0.feature; - DELETE FROM has_spouse_features WHERE dd_count = 0; + """ style: "sql_extractor" @@ -72,10 +68,9 @@ deepdive.extraction.extractors.extraction_rule_2 { sql: """ TRUNCATE people_mentions; INSERT INTO people_mentions - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, SUM(R0.dd_count) + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM dd_new_people_mentions R0 - GROUP BY R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id; - DELETE FROM people_mentions WHERE dd_count = 0; + """ style: "sql_extractor" diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index ee490fafe..87d478af8 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -100,7 +100,7 @@ deepdive.extraction.extractors.extraction_rule_15 { sql: """ - INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label + INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label FROM has_spouse_candidates R0 @@ -178,7 +178,7 @@ deepdive.inference.factors.has_spouse_0 { input_query: """ - SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" + SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(has_spouse.R0.label)" diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index 028b2ae9a..ca8c73cb5 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -27,8 +27,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; CREATE TABLE dd_delta_articles(article_id text, - text text, - dd_count int) + text text) """ style: "sql_extractor" } @@ -40,8 +39,7 @@ dd_new_has_spouse.label: Boolean start_position int, length int, text text, - mention_id text, - dd_count int) + mention_id text) """ style: "sql_extractor" } @@ -50,8 +48,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; CREATE TABLE dd_delta_has_spouse_features(relation_id text, - feature text, - dd_count int) + feature text) """ style: "sql_extractor" } @@ -60,8 +57,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; CREATE TABLE dd_new_articles(article_id text, - text text, - dd_count int) + text text) """ style: "sql_extractor" } @@ -74,8 +70,7 @@ dd_new_has_spouse.label: Boolean sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int) + is_true boolean) """ style: "sql_extractor" } @@ -84,8 +79,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; CREATE TABLE dd_new_has_spouse_features(relation_id text, - feature text, - dd_count int) + feature text) """ style: "sql_extractor" } @@ -101,8 +95,7 @@ dd_new_has_spouse.label: Boolean dependencies text, ner_tags text, sentence_offset int, - sentence_id text, - dd_count int) + sentence_id text) """ style: "sql_extractor" } @@ -114,8 +107,7 @@ dd_new_has_spouse.label: Boolean start_position int, length int, text text, - mention_id text, - dd_count int) + mention_id text) """ style: "sql_extractor" } @@ -128,8 +120,7 @@ dd_new_has_spouse.label: Boolean sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int) + is_true boolean) """ style: "sql_extractor" } @@ -145,8 +136,7 @@ dd_new_has_spouse.label: Boolean dependencies text, ner_tags text, sentence_offset int, - sentence_id text, - dd_count int) + sentence_id text) """ style: "sql_extractor" } @@ -156,8 +146,7 @@ dd_new_has_spouse.label: Boolean CREATE TABLE dd_delta_has_spouse(relation_id text, id bigint, - label boolean, - dd_count int) + label boolean) """ style: "sql_extractor" } @@ -182,16 +171,16 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_31 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ @@ -203,10 +192,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_people_mentions; INSERT INTO dd_new_people_mentions - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM people_mentions R0 UNION ALL - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM dd_delta_people_mentions R0 """ @@ -218,10 +207,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_articles; INSERT INTO dd_new_articles - SELECT R0.article_id, R0.text, R0.dd_count + SELECT R0.article_id, R0.text FROM articles R0 UNION ALL - SELECT R0.article_id, R0.text, R0.dd_count + SELECT R0.article_id, R0.text FROM dd_delta_articles R0 """ @@ -233,10 +222,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_25 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" FROM sentences R0 UNION ALL - SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" FROM dd_delta_sentences R0 """ @@ -248,10 +237,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE dd_new_has_spouse_candidates; INSERT INTO dd_new_has_spouse_candidates - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM has_spouse_candidates R0 UNION ALL - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM dd_delta_has_spouse_candidates R0 """ @@ -263,10 +252,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { sql: """ TRUNCATE dd_new_has_spouse_features; INSERT INTO dd_new_has_spouse_features - SELECT R0.relation_id, R0.feature, R0.dd_count + SELECT R0.relation_id, R0.feature FROM has_spouse_features R0 UNION ALL - SELECT R0.relation_id, R0.feature, R0.dd_count + SELECT R0.relation_id, R0.feature FROM dd_delta_has_spouse_features R0 """ @@ -278,10 +267,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_sentences; INSERT INTO dd_new_sentences - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM sentences R0 UNION ALL - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM dd_delta_sentences R0 """ @@ -293,10 +282,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM dd_delta_people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id UNION ALL - SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ @@ -307,7 +296,7 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_33 { sql: """ - INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count + INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label FROM dd_delta_has_spouse_candidates R0 @@ -319,10 +308,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_23 { sql: """ - CREATE VIEW dd_new_has_spouse AS SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count + CREATE VIEW dd_new_has_spouse AS SELECT DISTINCT R0.relation_id, id, label FROM has_spouse R0 - UNION ALL SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.relation_id, id, label FROM dd_delta_has_spouse R0 @@ -364,10 +353,10 @@ dd_new_has_spouse.label: Boolean deepdive.inference.factors.dd_new_has_spouse_0 { input_query: """ - SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL - SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Linear(dd_new_has_spouse.R0.label)" diff --git a/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected index d6ed9c2e6..3baea1486 100644 --- a/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected @@ -27,8 +27,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_articles CASCADE; CREATE TABLE dd_delta_articles(article_id text, - text text, - dd_count int) + text text) """ style: "sql_extractor" } @@ -40,8 +39,7 @@ dd_new_has_spouse.label: Boolean start_position int, length int, text text, - mention_id text, - dd_count int) + mention_id text) """ style: "sql_extractor" } @@ -50,8 +48,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_delta_has_spouse_features CASCADE; CREATE TABLE dd_delta_has_spouse_features(relation_id text, - feature text, - dd_count int) + feature text) """ style: "sql_extractor" } @@ -60,8 +57,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_articles CASCADE; CREATE TABLE dd_new_articles(article_id text, - text text, - dd_count int) + text text) """ style: "sql_extractor" } @@ -74,8 +70,7 @@ dd_new_has_spouse.label: Boolean sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int) + is_true boolean) """ style: "sql_extractor" } @@ -84,8 +79,7 @@ dd_new_has_spouse.label: Boolean sql: """ DROP TABLE IF EXISTS dd_new_has_spouse_features CASCADE; CREATE TABLE dd_new_has_spouse_features(relation_id text, - feature text, - dd_count int) + feature text) """ style: "sql_extractor" } @@ -101,8 +95,7 @@ dd_new_has_spouse.label: Boolean dependencies text, ner_tags text, sentence_offset int, - sentence_id text, - dd_count int) + sentence_id text) """ style: "sql_extractor" } @@ -114,8 +107,7 @@ dd_new_has_spouse.label: Boolean start_position int, length int, text text, - mention_id text, - dd_count int) + mention_id text) """ style: "sql_extractor" } @@ -128,8 +120,7 @@ dd_new_has_spouse.label: Boolean sentence_id text, description text, relation_id text, - is_true boolean, - dd_count int) + is_true boolean) """ style: "sql_extractor" } @@ -145,8 +136,7 @@ dd_new_has_spouse.label: Boolean dependencies text, ner_tags text, sentence_offset int, - sentence_id text, - dd_count int) + sentence_id text) """ style: "sql_extractor" } @@ -156,8 +146,7 @@ dd_new_has_spouse.label: Boolean CREATE TABLE dd_delta_has_spouse(relation_id text, id bigint, - label boolean, - dd_count int) + label boolean) """ style: "sql_extractor" } @@ -182,16 +171,16 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_31 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ @@ -203,10 +192,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_people_mentions; INSERT INTO dd_new_people_mentions - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM people_mentions R0 UNION ALL - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id, R0.dd_count + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM dd_delta_people_mentions R0 """ @@ -218,10 +207,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_articles; INSERT INTO dd_new_articles - SELECT R0.article_id, R0.text, R0.dd_count + SELECT R0.article_id, R0.text FROM articles R0 UNION ALL - SELECT R0.article_id, R0.text, R0.dd_count + SELECT R0.article_id, R0.text FROM dd_delta_articles R0 """ @@ -233,7 +222,7 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_25 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS - SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" , R0.dd_count AS "dd_count" + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" FROM dd_delta_sentences R0 """ @@ -245,10 +234,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE dd_new_has_spouse_candidates; INSERT INTO dd_new_has_spouse_candidates - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM has_spouse_candidates R0 UNION ALL - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true, R0.dd_count + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM dd_delta_has_spouse_candidates R0 """ @@ -260,10 +249,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { sql: """ TRUNCATE dd_new_has_spouse_features; INSERT INTO dd_new_has_spouse_features - SELECT R0.relation_id, R0.feature, R0.dd_count + SELECT R0.relation_id, R0.feature FROM has_spouse_features R0 UNION ALL - SELECT R0.relation_id, R0.feature, R0.dd_count + SELECT R0.relation_id, R0.feature FROM dd_delta_has_spouse_features R0 """ @@ -275,10 +264,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_sentences; INSERT INTO dd_new_sentences - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM sentences R0 UNION ALL - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id, R0.dd_count + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM dd_delta_sentences R0 """ @@ -290,10 +279,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM dd_delta_people_mentions R0, people_mentions R1 WHERE R1.sentence_id = R0.sentence_id UNION ALL - SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" , R0.dd_count * R1.dd_count AS "dd_count" + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 WHERE R1.sentence_id = R0.sentence_id """ @@ -304,7 +293,7 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_33 { sql: """ - INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label , R0.dd_count AS dd_count + INSERT INTO dd_delta_has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label FROM dd_delta_has_spouse_candidates R0 @@ -316,10 +305,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_23 { sql: """ - CREATE VIEW dd_new_has_spouse AS SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count + CREATE VIEW dd_new_has_spouse AS SELECT DISTINCT R0.relation_id, id, label FROM has_spouse R0 - UNION ALL SELECT DISTINCT R0.relation_id, id, label , R0.dd_count AS dd_count + UNION ALL SELECT DISTINCT R0.relation_id, id, label FROM dd_delta_has_spouse R0 @@ -361,10 +350,10 @@ dd_new_has_spouse.label: Boolean deepdive.inference.factors.dd_new_has_spouse_0 { input_query: """ - SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL - SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" , R0.dd_count * R1.dd_count * R2.dd_count AS "dd_count" + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(dd_new_has_spouse.R0.label)" @@ -374,7 +363,7 @@ dd_new_has_spouse.label: Boolean deepdive.inference.factors.dd_new_has_spouse_1 { input_query: """ - SELECT R0.id AS "dd_new_has_spouse.R0.id" , R1.id AS "dd_new_has_spouse.R1.id" , R0.dd_count * R1.dd_count * R2.dd_count * R3.dd_count AS "dd_count" + SELECT R0.id AS "dd_new_has_spouse.R0.id" , R1.id AS "dd_new_has_spouse.R1.id" FROM dd_new_has_spouse R0, dd_new_has_spouse R1, dd_new_has_spouse_candidates R2, dd_new_has_spouse_candidates R3 WHERE R2.relation_id = R0.relation_id AND R3.person1_id = R2.person2_id AND R3.person2_id = R2.person1_id AND R3.relation_id = R1.relation_id """ function: "Imply(dd_new_has_spouse.R1.label, dd_new_has_spouse.R0.label)" diff --git a/test/expected-output-test/spouse_example_new_inference/compile.expected b/test/expected-output-test/spouse_example_new_inference/compile.expected index d516477b9..d184ea219 100644 --- a/test/expected-output-test/spouse_example_new_inference/compile.expected +++ b/test/expected-output-test/spouse_example_new_inference/compile.expected @@ -100,7 +100,7 @@ deepdive.extraction.extractors.extraction_rule_15 { sql: """ - INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label + INSERT INTO has_spouse SELECT DISTINCT R0.relation_id, 0 as id, R0.is_true AS label FROM has_spouse_candidates R0 @@ -178,7 +178,7 @@ deepdive.inference.factors.has_spouse_0 { input_query: """ - SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" + SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(has_spouse.R0.label)" @@ -188,7 +188,7 @@ deepdive.inference.factors.has_spouse_1 { input_query: """ - SELECT R0.id AS "has_spouse.R0.id" , R1.id AS "has_spouse.R1.id" + SELECT R0.id AS "has_spouse.R0.id" , R1.id AS "has_spouse.R1.id" FROM has_spouse R0, has_spouse R1, has_spouse_candidates R2, has_spouse_candidates R3 WHERE R2.relation_id = R0.relation_id AND R3.person1_id = R2.person2_id AND R3.person2_id = R2.person1_id AND R3.relation_id = R1.relation_id """ function: "Imply(has_spouse.R1.label, has_spouse.R0.label)" From f390c460b71389b4c7716e8d0e0f0a43bb756e3f Mon Sep 17 00:00:00 2001 From: feiranwang Date: Wed, 1 Jul 2015 16:02:25 -0700 Subject: [PATCH 135/347] update smoke example --- .../compile-incremental.expected | 32 ++++++++++--------- .../compile-materialization.expected | 14 ++++++-- .../smoke_example/compile.expected | 14 ++++++-- .../smoke_example/input.ddl | 5 ++- .../smoke_example/print-incremental.expected | 10 +++++- .../smoke_example/print.expected | 8 ++++- 6 files changed, 61 insertions(+), 22 deletions(-) diff --git a/test/expected-output-test/smoke_example/compile-incremental.expected b/test/expected-output-test/smoke_example/compile-incremental.expected index b547dc8df..042e044f4 100644 --- a/test/expected-output-test/smoke_example/compile-incremental.expected +++ b/test/expected-output-test/smoke_example/compile-incremental.expected @@ -83,16 +83,6 @@ dd_delta_smoke.label: Boolean style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_18 { - sql: """ DROP TABLE IF EXISTS dd_new_smoke CASCADE; - CREATE TABLE - dd_new_smoke(person_id bigint, - id bigint, - label boolean) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_21 { sql: """ DROP TABLE IF EXISTS dd_delta_cancer CASCADE; CREATE TABLE @@ -138,7 +128,6 @@ dd_delta_smoke.label: Boolean TRUNCATE dd_delta_person; TRUNCATE dd_new_person; TRUNCATE dd_delta_person_has_cancer; - TRUNCATE dd_new_smoke; TRUNCATE dd_delta_cancer; TRUNCATE dd_delta_person_smokes; TRUNCATE dd_new_person_smokes; @@ -190,8 +179,8 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { - sql: """ TRUNCATE dd_new_smoke; - INSERT INTO dd_new_smoke SELECT DISTINCT R0.person_id, id, label + sql: """ + CREATE VIEW dd_new_smoke AS SELECT DISTINCT R0.person_id, id, label FROM smoke R0 UNION ALL SELECT DISTINCT R0.person_id, id, label @@ -270,12 +259,25 @@ dd_delta_smoke.label: Boolean FROM dd_new_cancer R0, dd_new_smoke R1, dd_delta_person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(dd_delta_smoke.R1.label, dd_new_cancer.R0.label)" - weight: "3.0" + weight: "0.5" + } + + + deepdive.inference.factors.dd_new_smoke_1 { + input_query: """ + SELECT R0.id AS "dd_new_smoke.R0.id" , R1.id AS "dd_delta_smoke.R1.id" + FROM dd_new_smoke R0, dd_delta_smoke R1, friend R2 + WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id UNION ALL + SELECT R0.id AS "dd_new_smoke.R0.id" , R1.id AS "dd_new_smoke.R1.id" + FROM dd_new_smoke R0, dd_new_smoke R1, dd_delta_friend R2 + WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id """ + function: "Imply(dd_delta_smoke.R1.label, dd_new_smoke.R0.label)" + weight: "0.4" } deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_13, extraction_rule_17, extraction_rule_6, extraction_rule_1, extraction_rule_2, extraction_rule_5, extraction_rule_18, extraction_rule_21, extraction_rule_22, extraction_rule_9, extraction_rule_10, extraction_rule_14] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_25, extraction_rule_19, extraction_rule_3, extraction_rule_23, extraction_rule_11, extraction_rule_24, extraction_rule_15] -deepdive.pipeline.pipelines.inference: [dd_new_cancer_0] +deepdive.pipeline.pipelines.inference: [dd_new_cancer_0, dd_new_smoke_1] deepdive.pipeline.pipelines.cleanup: [cleanup] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/smoke_example/compile-materialization.expected b/test/expected-output-test/smoke_example/compile-materialization.expected index 6ed2190ad..295dfd0a7 100644 --- a/test/expected-output-test/smoke_example/compile-materialization.expected +++ b/test/expected-output-test/smoke_example/compile-materialization.expected @@ -116,11 +116,21 @@ cancer.label: Boolean FROM cancer R0, smoke R1, person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(smoke.R1.label, cancer.R0.label)" - weight: "3.0" + weight: "0.5" + } + + + deepdive.inference.factors.smoke_1 { + input_query: """ + SELECT R0.id AS "smoke.R0.id" , R1.id AS "smoke.R1.id" + FROM smoke R0, smoke R1, friend R2 + WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id """ + function: "Imply(smoke.R1.label, smoke.R0.label)" + weight: "0.4" } deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_2, extraction_rule_4, extraction_rule_5, extraction_rule_3, extraction_rule_1, extraction_rule_0] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_6] -deepdive.pipeline.pipelines.inference: [cancer_0] +deepdive.pipeline.pipelines.inference: [cancer_0, smoke_1] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/smoke_example/compile.expected b/test/expected-output-test/smoke_example/compile.expected index c5147d90d..60b6e3112 100644 --- a/test/expected-output-test/smoke_example/compile.expected +++ b/test/expected-output-test/smoke_example/compile.expected @@ -116,10 +116,20 @@ cancer.label: Boolean FROM cancer R0, smoke R1, person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(smoke.R1.label, cancer.R0.label)" - weight: "3.0" + weight: "0.5" + } + + + deepdive.inference.factors.smoke_1 { + input_query: """ + SELECT R0.id AS "smoke.R0.id" , R1.id AS "smoke.R1.id" + FROM smoke R0, smoke R1, friend R2 + WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id """ + function: "Imply(smoke.R1.label, smoke.R0.label)" + weight: "0.4" } deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_2, extraction_rule_4, extraction_rule_5, extraction_rule_3, extraction_rule_1, extraction_rule_0] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_6] -deepdive.pipeline.pipelines.inference: [cancer_0] +deepdive.pipeline.pipelines.inference: [cancer_0, smoke_1] diff --git a/test/expected-output-test/smoke_example/input.ddl b/test/expected-output-test/smoke_example/input.ddl index 10da92bed..25e5ee7cf 100644 --- a/test/expected-output-test/smoke_example/input.ddl +++ b/test/expected-output-test/smoke_example/input.ddl @@ -30,4 +30,7 @@ smoke(pid) :- person_smokes(pid, l) label = l. cancer(pid) :- person_has_cancer(pid, l) label = l. cancer(pid) :- smoke(pid), person_smokes(pid, l) - weight = 3.0. + weight = 0.5. + +smoke(pid) :- smoke(pid1), friend(pid1, pid) + weight = 0.4. \ No newline at end of file diff --git a/test/expected-output-test/smoke_example/print-incremental.expected b/test/expected-output-test/smoke_example/print-incremental.expected index 5046bbaf3..7a52fb60d 100644 --- a/test/expected-output-test/smoke_example/print-incremental.expected +++ b/test/expected-output-test/smoke_example/print-incremental.expected @@ -83,6 +83,14 @@ dd_new_cancer(pid) :- person_smokes(pid, l); dd_new_smoke(pid), dd_delta_person_smokes(pid, l) - weight = 3.0 + weight = 0.5 + semantics = Imply. + +dd_new_smoke(pid) :- + dd_delta_smoke(pid1), + friend(pid1, pid); + dd_new_smoke(pid1), + dd_delta_friend(pid1, pid) + weight = 0.4 semantics = Imply. diff --git a/test/expected-output-test/smoke_example/print.expected b/test/expected-output-test/smoke_example/print.expected index 48f4d4b4f..bef8589bc 100644 --- a/test/expected-output-test/smoke_example/print.expected +++ b/test/expected-output-test/smoke_example/print.expected @@ -25,6 +25,12 @@ cancer(pid) :- cancer(pid) :- smoke(pid), person_smokes(pid, l) - weight = 3.0 + weight = 0.5 + semantics = Imply. + +smoke(pid) :- + smoke(pid1), + friend(pid1, pid) + weight = 0.4 semantics = Imply. From fade27892182f936d5ef5e2d3c9f449808cf75f5 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Wed, 1 Jul 2015 16:06:57 -0700 Subject: [PATCH 136/347] add ocr example --- .../ocr_example/compile-incremental.expected | 246 ++++++++++++++++++ .../compile-materialization.expected | 128 +++++++++ .../ocr_example/compile-merge.expected | 56 ++++ .../ocr_example/compile.expected | 127 +++++++++ .../ocr_example/input.ddl | 25 ++ .../ocr_example/print-incremental.expected | 83 ++++++ .../ocr_example/print.expected | 33 +++ 7 files changed, 698 insertions(+) create mode 100644 test/expected-output-test/ocr_example/compile-incremental.expected create mode 100644 test/expected-output-test/ocr_example/compile-materialization.expected create mode 100644 test/expected-output-test/ocr_example/compile-merge.expected create mode 100644 test/expected-output-test/ocr_example/compile.expected create mode 100644 test/expected-output-test/ocr_example/input.ddl create mode 100644 test/expected-output-test/ocr_example/print-incremental.expected create mode 100644 test/expected-output-test/ocr_example/print.expected diff --git a/test/expected-output-test/ocr_example/compile-incremental.expected b/test/expected-output-test/ocr_example/compile-incremental.expected new file mode 100644 index 000000000..c7da34686 --- /dev/null +++ b/test/expected-output-test/ocr_example/compile-incremental.expected @@ -0,0 +1,246 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: INCREMENTAL + } + + + deepdive.schema.keys { + dd_new_q1 : [wid] + dd_delta_q2 : [wid] + dd_delta_q1 : [wid] + dd_new_q2 : [wid] + } + + deepdive.schema.variables { + dd_delta_q1.label: Boolean +dd_new_q2.label: Boolean +dd_new_q1.label: Boolean +q2.label: Boolean +dd_delta_q2.label: Boolean +q1.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_label2 CASCADE; + CREATE TABLE + dd_new_label2(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_label1 CASCADE; + CREATE TABLE + dd_delta_label1(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_label1 CASCADE; + CREATE TABLE + dd_new_label1(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_features CASCADE; + CREATE TABLE + dd_delta_features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_q2 CASCADE; + CREATE TABLE + dd_delta_q2(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_label2 CASCADE; + CREATE TABLE + dd_delta_label2(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_features CASCADE; + CREATE TABLE + dd_new_features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_q1 CASCADE; + CREATE TABLE + dd_delta_q1(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE dd_new_label2; + TRUNCATE dd_delta_label1; + TRUNCATE dd_new_label1; + TRUNCATE dd_delta_features; + TRUNCATE dd_delta_q2; + TRUNCATE dd_delta_label2; + TRUNCATE dd_new_features; + TRUNCATE dd_delta_q1; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_11 { + sql: """ TRUNCATE dd_new_label2; + INSERT INTO dd_new_label2 + SELECT R0.wid, R0.val + FROM label2 R0 + UNION ALL + SELECT R0.wid, R0.val + FROM dd_delta_label2 R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ + CREATE VIEW dd_new_q1 AS SELECT DISTINCT R0.wid, id, label + FROM q1 R0 + + UNION ALL SELECT DISTINCT R0.wid, id, label + FROM dd_delta_q1 R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_20" ] + } + + + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ TRUNCATE dd_new_label1; + INSERT INTO dd_new_label1 + SELECT R0.wid, R0.val + FROM label1 R0 + UNION ALL + SELECT R0.wid, R0.val + FROM dd_delta_label1 R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ + INSERT INTO dd_delta_q2 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM dd_delta_label2 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ TRUNCATE dd_new_features; + INSERT INTO dd_new_features + SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val + FROM features R0 + UNION ALL + SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val + FROM dd_delta_features R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_20 { + sql: """ + INSERT INTO dd_delta_q1 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM dd_delta_label1 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_19 { + sql: """ + CREATE VIEW dd_new_q2 AS SELECT DISTINCT R0.wid, id, label + FROM q2 R0 + + UNION ALL SELECT DISTINCT R0.wid, id, label + FROM dd_delta_q2 R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_21" ] + } + + + deepdive.inference.factors.dd_new_q2_0 { + input_query: """ + SELECT R0.id AS "dd_new_q2.R0.id" , R1.feature_id AS "dd_delta_features.R1.feature_id" + FROM dd_new_q2 R0, dd_delta_features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(dd_new_q2.R0.label)" + weight: "?(dd_delta_features.R1.feature_id)" + } + + + deepdive.inference.factors.dd_new_q1_1 { + input_query: """ + SELECT R0.id AS "dd_new_q1.R0.id" , R1.feature_id AS "dd_delta_features.R1.feature_id" + FROM dd_new_q1 R0, dd_delta_features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(dd_new_q1.R0.label)" + weight: "?(dd_delta_features.R1.feature_id)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_10, extraction_rule_5, extraction_rule_14, extraction_rule_6, extraction_rule_1, extraction_rule_17, extraction_rule_9, extraction_rule_2, extraction_rule_13, extraction_rule_18] +deepdive.pipeline.pipelines.extraction: [extraction_rule_21, extraction_rule_7, extraction_rule_20, extraction_rule_19, extraction_rule_3, extraction_rule_11, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [dd_new_q2_0, dd_new_q1_1] +deepdive.pipeline.pipelines.cleanup: [cleanup] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/ocr_example/compile-materialization.expected b/test/expected-output-test/ocr_example/compile-materialization.expected new file mode 100644 index 000000000..1fc5c659c --- /dev/null +++ b/test/expected-output-test/ocr_example/compile-materialization.expected @@ -0,0 +1,128 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: MATERIALIZATION + } + + + + deepdive.schema.variables { + q1.label: Boolean +q2.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS label1 CASCADE; + CREATE TABLE + label1(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS features CASCADE; + CREATE TABLE + features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS label2 CASCADE; + CREATE TABLE + label2(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS q1 CASCADE; + CREATE TABLE + q1(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS q2 CASCADE; + CREATE TABLE + q2(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE label1; + TRUNCATE features; + TRUNCATE label2; + TRUNCATE q1; + TRUNCATE q2; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ + INSERT INTO q1 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM label1 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ + INSERT INTO q2 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM label2 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.inference.factors.q1_0 { + input_query: """ + SELECT R0.id AS "q1.R0.id" , R1.feature_id AS "features.R1.feature_id" + FROM q1 R0, features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(q1.R0.label)" + weight: "?(features.R1.feature_id)" + } + + + deepdive.inference.factors.q2_1 { + input_query: """ + SELECT R0.id AS "q2.R0.id" , R1.feature_id AS "features.R1.feature_id" + FROM q2 R0, features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(q2.R0.label)" + weight: "?(features.R1.feature_id)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2, extraction_rule_3, extraction_rule_4] +deepdive.pipeline.pipelines.extraction: [extraction_rule_5, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [q1_0, q2_1] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/ocr_example/compile-merge.expected b/test/expected-output-test/ocr_example/compile-merge.expected new file mode 100644 index 000000000..d45ec8193 --- /dev/null +++ b/test/expected-output-test/ocr_example/compile-merge.expected @@ -0,0 +1,56 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: MERGE + } + + + + deepdive.schema.variables { + + } + + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ TRUNCATE label2; + INSERT INTO label2 + SELECT R0.wid, R0.val + FROM dd_new_label2 R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ TRUNCATE label1; + INSERT INTO label1 + SELECT R0.wid, R0.val + FROM dd_new_label1 R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ TRUNCATE features; + INSERT INTO features + SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val + FROM dd_new_features R0 + + """ + style: "sql_extractor" + + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.extraction: [extraction_rule_2, extraction_rule_1, extraction_rule_0] diff --git a/test/expected-output-test/ocr_example/compile.expected b/test/expected-output-test/ocr_example/compile.expected new file mode 100644 index 000000000..e41c42804 --- /dev/null +++ b/test/expected-output-test/ocr_example/compile.expected @@ -0,0 +1,127 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: ORIGINAL + } + + + + deepdive.schema.variables { + q1.label: Boolean +q2.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS label1 CASCADE; + CREATE TABLE + label1(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS features CASCADE; + CREATE TABLE + features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS label2 CASCADE; + CREATE TABLE + label2(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS q1 CASCADE; + CREATE TABLE + q1(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS q2 CASCADE; + CREATE TABLE + q2(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE label1; + TRUNCATE features; + TRUNCATE label2; + TRUNCATE q1; + TRUNCATE q2; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ + INSERT INTO q1 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM label1 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ + INSERT INTO q2 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM label2 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.inference.factors.q1_0 { + input_query: """ + SELECT R0.id AS "q1.R0.id" , R1.feature_id AS "features.R1.feature_id" + FROM q1 R0, features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(q1.R0.label)" + weight: "?(features.R1.feature_id)" + } + + + deepdive.inference.factors.q2_1 { + input_query: """ + SELECT R0.id AS "q2.R0.id" , R1.feature_id AS "features.R1.feature_id" + FROM q2 R0, features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(q2.R0.label)" + weight: "?(features.R1.feature_id)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2, extraction_rule_3, extraction_rule_4] +deepdive.pipeline.pipelines.extraction: [extraction_rule_5, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [q1_0, q2_1] diff --git a/test/expected-output-test/ocr_example/input.ddl b/test/expected-output-test/ocr_example/input.ddl new file mode 100644 index 000000000..dba9cec58 --- /dev/null +++ b/test/expected-output-test/ocr_example/input.ddl @@ -0,0 +1,25 @@ +features( + id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +label1( + wid INT, + val BOOLEAN). + +label2( + wid INT, + val BOOLEAN). + +q1?(wid INT). +q2?(wid INT). + +q1(wid) :- label1(wid, val) label = val. +q2(wid) :- label2(wid, val) label = val. + +q1(wid) :- features(id, wid, fid, fval) +weight = fid. + +q2(wid) :- features(id, wid, fid, fval) +weight = fid. \ No newline at end of file diff --git a/test/expected-output-test/ocr_example/print-incremental.expected b/test/expected-output-test/ocr_example/print-incremental.expected new file mode 100644 index 000000000..e8ed0bbf8 --- /dev/null +++ b/test/expected-output-test/ocr_example/print-incremental.expected @@ -0,0 +1,83 @@ +features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +dd_delta_features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +dd_new_features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +dd_new_features(id, word_id, feature_id, feature_val) :- + features(id, word_id, feature_id, feature_val); + dd_delta_features(id, word_id, feature_id, feature_val). + +label1(wid INT, + val BOOLEAN). + +dd_delta_label1(wid INT, + val BOOLEAN). + +dd_new_label1(wid INT, + val BOOLEAN). + +dd_new_label1(wid, val) :- + label1(wid, val); + dd_delta_label1(wid, val). + +label2(wid INT, + val BOOLEAN). + +dd_delta_label2(wid INT, + val BOOLEAN). + +dd_new_label2(wid INT, + val BOOLEAN). + +dd_new_label2(wid, val) :- + label2(wid, val); + dd_delta_label2(wid, val). + +q1?(wid INT). + +dd_delta_q1?(wid INT). + +dd_new_q1?(wid INT). + +dd_new_q1(wid) :- + q1(wid); + dd_delta_q1(wid). + +q2?(wid INT). + +dd_delta_q2?(wid INT). + +dd_new_q2?(wid INT). + +dd_new_q2(wid) :- + q2(wid); + dd_delta_q2(wid). + +dd_delta_q1(wid) :- + dd_delta_label1(wid, val) + label = val. + +dd_delta_q2(wid) :- + dd_delta_label2(wid, val) + label = val. + +dd_new_q1(wid) :- + dd_delta_features(id, wid, fid, fval) + weight = fid + semantics = Imply. + +dd_new_q2(wid) :- + dd_delta_features(id, wid, fid, fval) + weight = fid + semantics = Imply. + diff --git a/test/expected-output-test/ocr_example/print.expected b/test/expected-output-test/ocr_example/print.expected new file mode 100644 index 000000000..da66f1d67 --- /dev/null +++ b/test/expected-output-test/ocr_example/print.expected @@ -0,0 +1,33 @@ +features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +label1(wid INT, + val BOOLEAN). + +label2(wid INT, + val BOOLEAN). + +q1?(wid INT). + +q2?(wid INT). + +q1(wid) :- + label1(wid, val) + label = val. + +q2(wid) :- + label2(wid, val) + label = val. + +q1(wid) :- + features(id, wid, fid, fval) + weight = fid + semantics = Imply. + +q2(wid) :- + features(id, wid, fid, fval) + weight = fid + semantics = Imply. + From 1ff0e953dd3ac3b137bd594eb3b24d96d8750a91 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Wed, 1 Jul 2015 16:02:25 -0700 Subject: [PATCH 137/347] add ocr and smoke example --- examples/ocr.ddl | 28 ++ examples/smoke.ddl | 8 +- .../ocr_example/compile-incremental.expected | 246 ++++++++++++++++++ .../compile-materialization.expected | 128 +++++++++ .../ocr_example/compile-merge.expected | 56 ++++ .../ocr_example/compile.expected | 127 +++++++++ .../ocr_example/input.ddl | 1 + .../ocr_example/print-incremental.expected | 83 ++++++ .../ocr_example/print.expected | 33 +++ .../compile-incremental.expected | 32 +-- .../compile-materialization.expected | 14 +- .../smoke_example/compile.expected | 14 +- .../smoke_example/input.ddl | 34 +-- .../smoke_example/print-incremental.expected | 10 +- .../smoke_example/print.expected | 8 +- 15 files changed, 766 insertions(+), 56 deletions(-) create mode 100644 examples/ocr.ddl create mode 100644 test/expected-output-test/ocr_example/compile-incremental.expected create mode 100644 test/expected-output-test/ocr_example/compile-materialization.expected create mode 100644 test/expected-output-test/ocr_example/compile-merge.expected create mode 100644 test/expected-output-test/ocr_example/compile.expected create mode 120000 test/expected-output-test/ocr_example/input.ddl create mode 100644 test/expected-output-test/ocr_example/print-incremental.expected create mode 100644 test/expected-output-test/ocr_example/print.expected mode change 100644 => 120000 test/expected-output-test/smoke_example/input.ddl diff --git a/examples/ocr.ddl b/examples/ocr.ddl new file mode 100644 index 000000000..0960ecaf4 --- /dev/null +++ b/examples/ocr.ddl @@ -0,0 +1,28 @@ +// ocr example from deepdive +// https://github.com/HazyResearch/deepdive/tree/master/examples/ocr + +features( + id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +label1( + wid INT, + val BOOLEAN). + +label2( + wid INT, + val BOOLEAN). + +q1?(wid INT). +q2?(wid INT). + +q1(wid) :- label1(wid, val) label = val. +q2(wid) :- label2(wid, val) label = val. + +q1(wid) :- features(id, wid, fid, fval) +weight = fid. + +q2(wid) :- features(id, wid, fid, fval) +weight = fid. \ No newline at end of file diff --git a/examples/smoke.ddl b/examples/smoke.ddl index 7371f2885..26f28fd37 100644 --- a/examples/smoke.ddl +++ b/examples/smoke.ddl @@ -1,3 +1,6 @@ +// smoke example from deepdive +// https://github.com/HazyResearch/deepdive/tree/master/examples/smoke + person ( person_id bigint, name text @@ -30,6 +33,7 @@ smoke(pid) :- person_smokes(pid, l) label = l. cancer(pid) :- person_has_cancer(pid, l) label = l. cancer(pid) :- smoke(pid), person_smokes(pid, l) - weight = 3 - label = l. + weight = 0.5. +smoke(pid) :- smoke(pid1), friend(pid1, pid) + weight = 0.4. \ No newline at end of file diff --git a/test/expected-output-test/ocr_example/compile-incremental.expected b/test/expected-output-test/ocr_example/compile-incremental.expected new file mode 100644 index 000000000..c7da34686 --- /dev/null +++ b/test/expected-output-test/ocr_example/compile-incremental.expected @@ -0,0 +1,246 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: INCREMENTAL + } + + + deepdive.schema.keys { + dd_new_q1 : [wid] + dd_delta_q2 : [wid] + dd_delta_q1 : [wid] + dd_new_q2 : [wid] + } + + deepdive.schema.variables { + dd_delta_q1.label: Boolean +dd_new_q2.label: Boolean +dd_new_q1.label: Boolean +q2.label: Boolean +dd_delta_q2.label: Boolean +q1.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP TABLE IF EXISTS dd_new_label2 CASCADE; + CREATE TABLE + dd_new_label2(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP TABLE IF EXISTS dd_delta_label1 CASCADE; + CREATE TABLE + dd_delta_label1(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP TABLE IF EXISTS dd_new_label1 CASCADE; + CREATE TABLE + dd_new_label1(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS dd_delta_features CASCADE; + CREATE TABLE + dd_delta_features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP TABLE IF EXISTS dd_delta_q2 CASCADE; + CREATE TABLE + dd_delta_q2(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP TABLE IF EXISTS dd_delta_label2 CASCADE; + CREATE TABLE + dd_delta_label2(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS dd_new_features CASCADE; + CREATE TABLE + dd_new_features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP TABLE IF EXISTS dd_delta_q1 CASCADE; + CREATE TABLE + dd_delta_q1(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE dd_new_label2; + TRUNCATE dd_delta_label1; + TRUNCATE dd_new_label1; + TRUNCATE dd_delta_features; + TRUNCATE dd_delta_q2; + TRUNCATE dd_delta_label2; + TRUNCATE dd_new_features; + TRUNCATE dd_delta_q1; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_11 { + sql: """ TRUNCATE dd_new_label2; + INSERT INTO dd_new_label2 + SELECT R0.wid, R0.val + FROM label2 R0 + UNION ALL + SELECT R0.wid, R0.val + FROM dd_delta_label2 R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ + CREATE VIEW dd_new_q1 AS SELECT DISTINCT R0.wid, id, label + FROM q1 R0 + + UNION ALL SELECT DISTINCT R0.wid, id, label + FROM dd_delta_q1 R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_20" ] + } + + + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ TRUNCATE dd_new_label1; + INSERT INTO dd_new_label1 + SELECT R0.wid, R0.val + FROM label1 R0 + UNION ALL + SELECT R0.wid, R0.val + FROM dd_delta_label1 R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_21 { + sql: """ + INSERT INTO dd_delta_q2 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM dd_delta_label2 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ TRUNCATE dd_new_features; + INSERT INTO dd_new_features + SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val + FROM features R0 + UNION ALL + SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val + FROM dd_delta_features R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_20 { + sql: """ + INSERT INTO dd_delta_q1 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM dd_delta_label1 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_19 { + sql: """ + CREATE VIEW dd_new_q2 AS SELECT DISTINCT R0.wid, id, label + FROM q2 R0 + + UNION ALL SELECT DISTINCT R0.wid, id, label + FROM dd_delta_q2 R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_21" ] + } + + + deepdive.inference.factors.dd_new_q2_0 { + input_query: """ + SELECT R0.id AS "dd_new_q2.R0.id" , R1.feature_id AS "dd_delta_features.R1.feature_id" + FROM dd_new_q2 R0, dd_delta_features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(dd_new_q2.R0.label)" + weight: "?(dd_delta_features.R1.feature_id)" + } + + + deepdive.inference.factors.dd_new_q1_1 { + input_query: """ + SELECT R0.id AS "dd_new_q1.R0.id" , R1.feature_id AS "dd_delta_features.R1.feature_id" + FROM dd_new_q1 R0, dd_delta_features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(dd_new_q1.R0.label)" + weight: "?(dd_delta_features.R1.feature_id)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_10, extraction_rule_5, extraction_rule_14, extraction_rule_6, extraction_rule_1, extraction_rule_17, extraction_rule_9, extraction_rule_2, extraction_rule_13, extraction_rule_18] +deepdive.pipeline.pipelines.extraction: [extraction_rule_21, extraction_rule_7, extraction_rule_20, extraction_rule_19, extraction_rule_3, extraction_rule_11, extraction_rule_15] +deepdive.pipeline.pipelines.inference: [dd_new_q2_0, dd_new_q1_1] +deepdive.pipeline.pipelines.cleanup: [cleanup] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/ocr_example/compile-materialization.expected b/test/expected-output-test/ocr_example/compile-materialization.expected new file mode 100644 index 000000000..1fc5c659c --- /dev/null +++ b/test/expected-output-test/ocr_example/compile-materialization.expected @@ -0,0 +1,128 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: MATERIALIZATION + } + + + + deepdive.schema.variables { + q1.label: Boolean +q2.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS label1 CASCADE; + CREATE TABLE + label1(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS features CASCADE; + CREATE TABLE + features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS label2 CASCADE; + CREATE TABLE + label2(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS q1 CASCADE; + CREATE TABLE + q1(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS q2 CASCADE; + CREATE TABLE + q2(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE label1; + TRUNCATE features; + TRUNCATE label2; + TRUNCATE q1; + TRUNCATE q2; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ + INSERT INTO q1 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM label1 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ + INSERT INTO q2 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM label2 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.inference.factors.q1_0 { + input_query: """ + SELECT R0.id AS "q1.R0.id" , R1.feature_id AS "features.R1.feature_id" + FROM q1 R0, features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(q1.R0.label)" + weight: "?(features.R1.feature_id)" + } + + + deepdive.inference.factors.q2_1 { + input_query: """ + SELECT R0.id AS "q2.R0.id" , R1.feature_id AS "features.R1.feature_id" + FROM q2 R0, features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(q2.R0.label)" + weight: "?(features.R1.feature_id)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2, extraction_rule_3, extraction_rule_4] +deepdive.pipeline.pipelines.extraction: [extraction_rule_5, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [q1_0, q2_1] +deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/ocr_example/compile-merge.expected b/test/expected-output-test/ocr_example/compile-merge.expected new file mode 100644 index 000000000..d45ec8193 --- /dev/null +++ b/test/expected-output-test/ocr_example/compile-merge.expected @@ -0,0 +1,56 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: MERGE + } + + + + deepdive.schema.variables { + + } + + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ TRUNCATE label2; + INSERT INTO label2 + SELECT R0.wid, R0.val + FROM dd_new_label2 R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ TRUNCATE label1; + INSERT INTO label1 + SELECT R0.wid, R0.val + FROM dd_new_label1 R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ TRUNCATE features; + INSERT INTO features + SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val + FROM dd_new_features R0 + + """ + style: "sql_extractor" + + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.extraction: [extraction_rule_2, extraction_rule_1, extraction_rule_0] diff --git a/test/expected-output-test/ocr_example/compile.expected b/test/expected-output-test/ocr_example/compile.expected new file mode 100644 index 000000000..e41c42804 --- /dev/null +++ b/test/expected-output-test/ocr_example/compile.expected @@ -0,0 +1,127 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: ORIGINAL + } + + + + deepdive.schema.variables { + q1.label: Boolean +q2.label: Boolean + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS label1 CASCADE; + CREATE TABLE + label1(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS features CASCADE; + CREATE TABLE + features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS label2 CASCADE; + CREATE TABLE + label2(wid INT, + val BOOLEAN) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS q1 CASCADE; + CREATE TABLE + q1(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP TABLE IF EXISTS q2 CASCADE; + CREATE TABLE + q2(wid INT, + id bigint, + label boolean) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE label1; + TRUNCATE features; + TRUNCATE label2; + TRUNCATE q1; + TRUNCATE q2; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ + INSERT INTO q1 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM label1 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ + INSERT INTO q2 SELECT DISTINCT R0.wid, 0 as id, R0.val AS label + FROM label2 R0 + + + """ + style: "sql_extractor" + + } + + + deepdive.inference.factors.q1_0 { + input_query: """ + SELECT R0.id AS "q1.R0.id" , R1.feature_id AS "features.R1.feature_id" + FROM q1 R0, features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(q1.R0.label)" + weight: "?(features.R1.feature_id)" + } + + + deepdive.inference.factors.q2_1 { + input_query: """ + SELECT R0.id AS "q2.R0.id" , R1.feature_id AS "features.R1.feature_id" + FROM q2 R0, features R1 + WHERE R1.word_id = R0.wid """ + function: "Imply(q2.R0.label)" + weight: "?(features.R1.feature_id)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2, extraction_rule_3, extraction_rule_4] +deepdive.pipeline.pipelines.extraction: [extraction_rule_5, extraction_rule_6] +deepdive.pipeline.pipelines.inference: [q1_0, q2_1] diff --git a/test/expected-output-test/ocr_example/input.ddl b/test/expected-output-test/ocr_example/input.ddl new file mode 120000 index 000000000..69125ec70 --- /dev/null +++ b/test/expected-output-test/ocr_example/input.ddl @@ -0,0 +1 @@ +../../../examples/ocr.ddl \ No newline at end of file diff --git a/test/expected-output-test/ocr_example/print-incremental.expected b/test/expected-output-test/ocr_example/print-incremental.expected new file mode 100644 index 000000000..e8ed0bbf8 --- /dev/null +++ b/test/expected-output-test/ocr_example/print-incremental.expected @@ -0,0 +1,83 @@ +features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +dd_delta_features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +dd_new_features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +dd_new_features(id, word_id, feature_id, feature_val) :- + features(id, word_id, feature_id, feature_val); + dd_delta_features(id, word_id, feature_id, feature_val). + +label1(wid INT, + val BOOLEAN). + +dd_delta_label1(wid INT, + val BOOLEAN). + +dd_new_label1(wid INT, + val BOOLEAN). + +dd_new_label1(wid, val) :- + label1(wid, val); + dd_delta_label1(wid, val). + +label2(wid INT, + val BOOLEAN). + +dd_delta_label2(wid INT, + val BOOLEAN). + +dd_new_label2(wid INT, + val BOOLEAN). + +dd_new_label2(wid, val) :- + label2(wid, val); + dd_delta_label2(wid, val). + +q1?(wid INT). + +dd_delta_q1?(wid INT). + +dd_new_q1?(wid INT). + +dd_new_q1(wid) :- + q1(wid); + dd_delta_q1(wid). + +q2?(wid INT). + +dd_delta_q2?(wid INT). + +dd_new_q2?(wid INT). + +dd_new_q2(wid) :- + q2(wid); + dd_delta_q2(wid). + +dd_delta_q1(wid) :- + dd_delta_label1(wid, val) + label = val. + +dd_delta_q2(wid) :- + dd_delta_label2(wid, val) + label = val. + +dd_new_q1(wid) :- + dd_delta_features(id, wid, fid, fval) + weight = fid + semantics = Imply. + +dd_new_q2(wid) :- + dd_delta_features(id, wid, fid, fval) + weight = fid + semantics = Imply. + diff --git a/test/expected-output-test/ocr_example/print.expected b/test/expected-output-test/ocr_example/print.expected new file mode 100644 index 000000000..da66f1d67 --- /dev/null +++ b/test/expected-output-test/ocr_example/print.expected @@ -0,0 +1,33 @@ +features(id BIGSERIAL, + word_id INT, + feature_id INT, + feature_val BOOLEAN). + +label1(wid INT, + val BOOLEAN). + +label2(wid INT, + val BOOLEAN). + +q1?(wid INT). + +q2?(wid INT). + +q1(wid) :- + label1(wid, val) + label = val. + +q2(wid) :- + label2(wid, val) + label = val. + +q1(wid) :- + features(id, wid, fid, fval) + weight = fid + semantics = Imply. + +q2(wid) :- + features(id, wid, fid, fval) + weight = fid + semantics = Imply. + diff --git a/test/expected-output-test/smoke_example/compile-incremental.expected b/test/expected-output-test/smoke_example/compile-incremental.expected index b547dc8df..042e044f4 100644 --- a/test/expected-output-test/smoke_example/compile-incremental.expected +++ b/test/expected-output-test/smoke_example/compile-incremental.expected @@ -83,16 +83,6 @@ dd_delta_smoke.label: Boolean style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_18 { - sql: """ DROP TABLE IF EXISTS dd_new_smoke CASCADE; - CREATE TABLE - dd_new_smoke(person_id bigint, - id bigint, - label boolean) - """ - style: "sql_extractor" - } - deepdive.extraction.extractors.extraction_rule_21 { sql: """ DROP TABLE IF EXISTS dd_delta_cancer CASCADE; CREATE TABLE @@ -138,7 +128,6 @@ dd_delta_smoke.label: Boolean TRUNCATE dd_delta_person; TRUNCATE dd_new_person; TRUNCATE dd_delta_person_has_cancer; - TRUNCATE dd_new_smoke; TRUNCATE dd_delta_cancer; TRUNCATE dd_delta_person_smokes; TRUNCATE dd_new_person_smokes; @@ -190,8 +179,8 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { - sql: """ TRUNCATE dd_new_smoke; - INSERT INTO dd_new_smoke SELECT DISTINCT R0.person_id, id, label + sql: """ + CREATE VIEW dd_new_smoke AS SELECT DISTINCT R0.person_id, id, label FROM smoke R0 UNION ALL SELECT DISTINCT R0.person_id, id, label @@ -270,12 +259,25 @@ dd_delta_smoke.label: Boolean FROM dd_new_cancer R0, dd_new_smoke R1, dd_delta_person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(dd_delta_smoke.R1.label, dd_new_cancer.R0.label)" - weight: "3.0" + weight: "0.5" + } + + + deepdive.inference.factors.dd_new_smoke_1 { + input_query: """ + SELECT R0.id AS "dd_new_smoke.R0.id" , R1.id AS "dd_delta_smoke.R1.id" + FROM dd_new_smoke R0, dd_delta_smoke R1, friend R2 + WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id UNION ALL + SELECT R0.id AS "dd_new_smoke.R0.id" , R1.id AS "dd_new_smoke.R1.id" + FROM dd_new_smoke R0, dd_new_smoke R1, dd_delta_friend R2 + WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id """ + function: "Imply(dd_delta_smoke.R1.label, dd_new_smoke.R0.label)" + weight: "0.4" } deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_13, extraction_rule_17, extraction_rule_6, extraction_rule_1, extraction_rule_2, extraction_rule_5, extraction_rule_18, extraction_rule_21, extraction_rule_22, extraction_rule_9, extraction_rule_10, extraction_rule_14] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_25, extraction_rule_19, extraction_rule_3, extraction_rule_23, extraction_rule_11, extraction_rule_24, extraction_rule_15] -deepdive.pipeline.pipelines.inference: [dd_new_cancer_0] +deepdive.pipeline.pipelines.inference: [dd_new_cancer_0, dd_new_smoke_1] deepdive.pipeline.pipelines.cleanup: [cleanup] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/smoke_example/compile-materialization.expected b/test/expected-output-test/smoke_example/compile-materialization.expected index 6ed2190ad..295dfd0a7 100644 --- a/test/expected-output-test/smoke_example/compile-materialization.expected +++ b/test/expected-output-test/smoke_example/compile-materialization.expected @@ -116,11 +116,21 @@ cancer.label: Boolean FROM cancer R0, smoke R1, person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(smoke.R1.label, cancer.R0.label)" - weight: "3.0" + weight: "0.5" + } + + + deepdive.inference.factors.smoke_1 { + input_query: """ + SELECT R0.id AS "smoke.R0.id" , R1.id AS "smoke.R1.id" + FROM smoke R0, smoke R1, friend R2 + WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id """ + function: "Imply(smoke.R1.label, smoke.R0.label)" + weight: "0.4" } deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_2, extraction_rule_4, extraction_rule_5, extraction_rule_3, extraction_rule_1, extraction_rule_0] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_6] -deepdive.pipeline.pipelines.inference: [cancer_0] +deepdive.pipeline.pipelines.inference: [cancer_0, smoke_1] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/smoke_example/compile.expected b/test/expected-output-test/smoke_example/compile.expected index c5147d90d..60b6e3112 100644 --- a/test/expected-output-test/smoke_example/compile.expected +++ b/test/expected-output-test/smoke_example/compile.expected @@ -116,10 +116,20 @@ cancer.label: Boolean FROM cancer R0, smoke R1, person_smokes R2 WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(smoke.R1.label, cancer.R0.label)" - weight: "3.0" + weight: "0.5" + } + + + deepdive.inference.factors.smoke_1 { + input_query: """ + SELECT R0.id AS "smoke.R0.id" , R1.id AS "smoke.R1.id" + FROM smoke R0, smoke R1, friend R2 + WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id """ + function: "Imply(smoke.R1.label, smoke.R0.label)" + weight: "0.4" } deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_2, extraction_rule_4, extraction_rule_5, extraction_rule_3, extraction_rule_1, extraction_rule_0] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_6] -deepdive.pipeline.pipelines.inference: [cancer_0] +deepdive.pipeline.pipelines.inference: [cancer_0, smoke_1] diff --git a/test/expected-output-test/smoke_example/input.ddl b/test/expected-output-test/smoke_example/input.ddl deleted file mode 100644 index 10da92bed..000000000 --- a/test/expected-output-test/smoke_example/input.ddl +++ /dev/null @@ -1,33 +0,0 @@ -person ( - person_id bigint, - name text -). - -person_has_cancer ( - person_id bigint, - has_cancer boolean -). - -person_smokes ( - person_id bigint, - smokes boolean -). - -friends ( - person_id bigint, - friend_id bigint -). - -smoke? ( - person_id bigint -). - -cancer? ( - person_id bigint -). - -smoke(pid) :- person_smokes(pid, l) label = l. -cancer(pid) :- person_has_cancer(pid, l) label = l. - -cancer(pid) :- smoke(pid), person_smokes(pid, l) - weight = 3.0. diff --git a/test/expected-output-test/smoke_example/input.ddl b/test/expected-output-test/smoke_example/input.ddl new file mode 120000 index 000000000..5c2cf2d32 --- /dev/null +++ b/test/expected-output-test/smoke_example/input.ddl @@ -0,0 +1 @@ +../../../examples/smoke.ddl \ No newline at end of file diff --git a/test/expected-output-test/smoke_example/print-incremental.expected b/test/expected-output-test/smoke_example/print-incremental.expected index 5046bbaf3..7a52fb60d 100644 --- a/test/expected-output-test/smoke_example/print-incremental.expected +++ b/test/expected-output-test/smoke_example/print-incremental.expected @@ -83,6 +83,14 @@ dd_new_cancer(pid) :- person_smokes(pid, l); dd_new_smoke(pid), dd_delta_person_smokes(pid, l) - weight = 3.0 + weight = 0.5 + semantics = Imply. + +dd_new_smoke(pid) :- + dd_delta_smoke(pid1), + friend(pid1, pid); + dd_new_smoke(pid1), + dd_delta_friend(pid1, pid) + weight = 0.4 semantics = Imply. diff --git a/test/expected-output-test/smoke_example/print.expected b/test/expected-output-test/smoke_example/print.expected index 48f4d4b4f..bef8589bc 100644 --- a/test/expected-output-test/smoke_example/print.expected +++ b/test/expected-output-test/smoke_example/print.expected @@ -25,6 +25,12 @@ cancer(pid) :- cancer(pid) :- smoke(pid), person_smokes(pid, l) - weight = 3.0 + weight = 0.5 + semantics = Imply. + +smoke(pid) :- + smoke(pid1), + friend(pid1, pid) + weight = 0.4 semantics = Imply. From 7c04a0a1dc411ebf8c43ff84f3f9df5508277549 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Wed, 1 Jul 2015 19:36:41 -0700 Subject: [PATCH 138/347] add support for multinomial --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 30 +++++++++++++++---- .../deepdive/ddlog/DeepDiveLogParser.scala | 28 ++++++++++++++--- 2 files changed, 49 insertions(+), 9 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 289c1bfbe..88575c0f6 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -86,6 +86,9 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C var ground_relations : Map[ String, Boolean ] = new HashMap[ String, Boolean ]() + // map relation name -> variable type + var variableType : Map[String, VariableType] = new HashMap[String, VariableType]() + var function_schema : Map[String, FunctionDeclaration] = new HashMap[ String, FunctionDeclaration]() // The dependency graph between statements. @@ -109,11 +112,12 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // generate the statements. mode = config.mode statements.foreach { - case SchemaDeclaration(Attribute(r, terms, types), isQuery) => { + case SchemaDeclaration(Attribute(r, terms, types), isQuery, vType) => { terms.foreach { case Variable(n,r,i) => schema += { (r,i) -> n } ground_relations += { r -> !isQuery } // record whether a query or a ground term. + if (isQuery) variableType += { r -> vType.get } } if (isQuery) variableTableNames += r } @@ -312,7 +316,13 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var columnDecls = stmt.a.terms map { case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" } - if (stmt.isQuery) columnDecls = columnDecls :+ "id bigint" :+ "label boolean" + if (stmt.isQuery) { + val labelColumn = stmt.variableType match { + case Some(BooleanType) => "label boolean" + case Some(MultinomialType(_)) => "label int" + } + columnDecls = columnDecls :+ "id bigint" :+ labelColumn + } val indentation = " " * stmt.a.name.length val blockName = ss.resolveExtractorBlockName(stmt) schemas += s""" @@ -493,7 +503,11 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { else "" }).filter(_ != "") val firstFunc = funcBody(0) - func = s"""${stmt.semantics}(${(funcBody.tail :+ firstFunc).mkString(", ")})""" + val function = ss.variableType get stmt.q.head.name match { + case Some(BooleanType) => stmt.semantics + case Some(MultinomialType(_)) => "Multinomial" + } + func = s"""${function}(${(funcBody.tail :+ firstFunc).mkString(", ")})""" } // weight if (weight.length == 0) @@ -581,8 +595,14 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var schema = Set[String]() // generate the statements. statements.foreach { - case SchemaDeclaration(a, isQuery) => - if (isQuery) schema += s"${a.name}.label: Boolean" + case SchemaDeclaration(a, isQuery, variableType) => + if (isQuery) { + val variableTypeDecl = variableType match { + case Some(BooleanType) => "Boolean" + case Some(MultinomialType(x)) => s"Categorical(${x})" + } + schema += s"${a.name}.label: ${variableTypeDecl}" + } case _ => () } val ddSchema = s""" diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 10cc1caac..65ce612d7 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -17,6 +17,17 @@ case class Attribute(name : String, terms : List[Variable], types : List[String] case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]]) case class Column(name : String, t : String) +// variable type +sealed trait VariableType { + def cardinality: Long +} +case object BooleanType extends VariableType { + def cardinality = 2 +} +case class MultinomialType(numCategories: Int) extends VariableType { + def cardinality = numCategories +} + sealed trait FactorWeight { def variables : List[String] } @@ -35,7 +46,7 @@ case class RowWiseLineHandler(format: String, command: String) extends FunctionI // Statements that will be parsed and compiled trait Statement -case class SchemaDeclaration( a : Attribute , isQuery : Boolean ) extends Statement // atom and whether this is a query relation. +case class SchemaDeclaration( a : Attribute , isQuery : Boolean, variableType : Option[VariableType] = None) extends Statement // atom and whether this is a query relation. case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration], mode: String = null) extends Statement case class ExtractionRule(q : ConjunctiveQuery, supervision: String = null) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule @@ -73,12 +84,21 @@ class DeepDiveLogParser extends JavaTokenParsers { columnName ~ columnType ^^ { case(name ~ ty) => Column(name, ty) } + + def CategoricalParser = "Categorical" ~> "(" ~> """\d+""".r <~ ")" ^^ { n => MultinomialType(n.toInt) } + def BooleanParser = "Boolean" ^^ { s => BooleanType } + def dataType = CategoricalParser | BooleanParser + def schemaDeclaration: Parser[SchemaDeclaration] = - relationName ~ opt("?") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ^^ { - case (r ~ isQuery ~ "(" ~ attrs ~ ")") => { + relationName ~ opt("?") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ~ opt(dataType) ^^ { + case (r ~ isQuery ~ "(" ~ attrs ~ ")" ~ vType) => { val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } var types = attrs map { case(x) => x.t } - SchemaDeclaration(Attribute(r, vars, types), (isQuery != None)) + val variableType = vType match { + case None => if (isQuery != None) Some(BooleanType) else None + case Some(s) => Some(s) + } + SchemaDeclaration(Attribute(r, vars, types), (isQuery != None), variableType) } } From 0fe09eb9810c20f4441cb144a251d3eac878e4f5 Mon Sep 17 00:00:00 2001 From: feiranwang Date: Wed, 1 Jul 2015 19:59:07 -0700 Subject: [PATCH 139/347] add chunking example --- examples/chunking.ddl | 43 ++++++ .../chunking_example/compile.expected | 128 ++++++++++++++++++ .../chunking_example/input.ddl | 1 + 3 files changed, 172 insertions(+) create mode 100644 examples/chunking.ddl create mode 100644 test/expected-output-test/chunking_example/compile.expected create mode 120000 test/expected-output-test/chunking_example/input.ddl diff --git a/examples/chunking.ddl b/examples/chunking.ddl new file mode 100644 index 000000000..1ef26623d --- /dev/null +++ b/examples/chunking.ddl @@ -0,0 +1,43 @@ +// chunking example +// https://github.com/HazyResearch/deepdive/tree/master/examples/chunking + +words_raw( + word_id bigserial, + word text, + pos text, + tag text). + +words( + sent_id bigint, + word_id bigint, + word text, + pos text, + true_tag text). + +word_features( + word_id bigint, + feature text). + +tag?(word_id bigint) Categorical(13). + +function ext_training + over like words_raw + returns like words + implementation "/udf/ext_training.py" handles tsv lines. + +words :- !ext_training(words_raw). + +ext_features_input(word_id1, word1, pos1, word2, pos2) :- + words(sent_id, word_id1, word1, pos1, tag1), + words(sent_id, word_id2, word2, pos2, tag2). + +function ext_features + over like ext_features_input + returns like word_features + implementation "/udf/ext_features.py" handles tsv lines. + +word_features :- !ext_features(ext_features_input). + +tag(word_id) :- words(word_id, a, b, c, tag) label = tag. + +tag(word_id) :- word_features(word_id, f) weight = f. diff --git a/test/expected-output-test/chunking_example/compile.expected b/test/expected-output-test/chunking_example/compile.expected new file mode 100644 index 000000000..1288f1873 --- /dev/null +++ b/test/expected-output-test/chunking_example/compile.expected @@ -0,0 +1,128 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: ORIGINAL + } + + + + deepdive.schema.variables { + tag.label: Categorical(13) + } + + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS words_raw CASCADE; + CREATE TABLE + words_raw(word_id bigserial, + word text, + pos text, + tag text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS words CASCADE; + CREATE TABLE + words(sent_id bigint, + word_id bigint, + word text, + pos text, + true_tag text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP TABLE IF EXISTS tag CASCADE; + CREATE TABLE + tag(word_id bigint, + id bigint, + label int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS word_features CASCADE; + CREATE TABLE + word_features(word_id bigint, + feature text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE words_raw; + TRUNCATE words; + TRUNCATE tag; + TRUNCATE word_features; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP VIEW IF EXISTS ext_features_input; + CREATE VIEW ext_features_input AS + SELECT R0.word_id AS "words.R0.word_id" , R0.word AS "words.R0.word" , R0.pos AS "words.R0.pos" , R1.word AS "words.R1.word" , R1.pos AS "words.R1.pos" + FROM words R0, words R1 + WHERE R1.sent_id = R0.sent_id + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_5" ] + } + + + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ + INSERT INTO tag SELECT DISTINCT R0.sent_id, 0 as id, R0.true_tag AS label + FROM words R0 + + + """ + style: "sql_extractor" + dependencies: [ "extraction_rule_5" ] + } + + + deepdive.extraction.extractors.extraction_rule_8 { + input: """ SELECT * FROM ext_features_input + """ + output_relation: "word_features" + udf: ${APP_HOME}"/udf/ext_features.py" + style: "tsv_extractor" + dependencies: [ "extraction_rule_6" ] + } + + + deepdive.extraction.extractors.extraction_rule_5 { + input: """ SELECT * FROM words_raw + """ + output_relation: "words" + udf: ${APP_HOME}"/udf/ext_training.py" + style: "tsv_extractor" + + } + + + deepdive.inference.factors.tag_0 { + input_query: """ + SELECT R0.id AS "tag.R0.id" , R1.feature AS "word_features.R1.feature" + FROM tag R0, word_features R1 + WHERE R1.word_id = R0.word_id """ + function: "Multinomial(tag.R0.label)" + weight: "?(word_features.R1.feature)" + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_0, extraction_rule_1, extraction_rule_3, extraction_rule_2] +deepdive.pipeline.pipelines.extraction: [extraction_rule_6, extraction_rule_9, extraction_rule_8, extraction_rule_5] +deepdive.pipeline.pipelines.inference: [tag_0] diff --git a/test/expected-output-test/chunking_example/input.ddl b/test/expected-output-test/chunking_example/input.ddl new file mode 120000 index 000000000..64139b0ef --- /dev/null +++ b/test/expected-output-test/chunking_example/input.ddl @@ -0,0 +1 @@ +../../../examples/chunking.ddl \ No newline at end of file From b495f8dc9fa9a6abea75b7f97b6aee8c5c9efd52 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Fri, 3 Jul 2015 22:01:26 -0700 Subject: [PATCH 140/347] initial support for constants, conditions --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 35 +++++++++++-- .../ddlog/DeepDiveLogDeltaDeriver.scala | 24 ++++++--- .../ddlog/DeepDiveLogMergeDeriver.scala | 2 +- .../deepdive/ddlog/DeepDiveLogParser.scala | 51 +++++++++++++++---- .../ddlog/DeepDiveLogPrettyPrinter.scala | 10 +++- .../conditions/compile.expected | 0 .../expected-output-test/conditions/input.ddl | 5 ++ 7 files changed, 105 insertions(+), 22 deletions(-) create mode 100644 test/expected-output-test/conditions/compile.expected create mode 100644 test/expected-output-test/conditions/input.ddl diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 88575c0f6..3ea56435c 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -194,8 +194,8 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // and stick it in a map. val qs = new QuerySchema(z) - val whereClause = z.bodies(0).zipWithIndex flatMap { - case (Atom(relName, terms),body_index) => + var whereClause = z.bodies(0).zipWithIndex flatMap { + case (Atom(relName, terms),body_index) => { terms flatMap { case Variable(varName, relName, index) => val canonical_body_index = qs.getBodyIndex(varName) @@ -205,9 +205,30 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C val real_attr_name2 = resolveName( qs.getVar(varName)) Some(s"R${ body_index }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") } else { None } + // a constant in body means a equality condition + case Constant(value, r, i) => { + val attr = schema(relName, i) + Some(s"R${body_index}.${attr} = ${value}") + } } + } + } + println(whereClause) + + // resolve conditions + val conditions = z.conditions(0) flatMap { case Condition(lhs, op, rhs, isRhsValue) => + val resolvedLhs = resolveColumn(lhs, qs, z, OriginalOnly) + val resolvedRhs = isRhsValue match { + case true => Some(rhs) + case false => resolveColumn(rhs, qs, z, OriginalOnly) + } + Some(s"${resolvedLhs.get} ${op} ${resolvedRhs.get}") } - val whereClauseStr = whereClause match { + println(conditions) + + whereClause = whereClause ++ conditions + + var whereClauseStr = whereClause match { case Nil => "" case _ => s"""WHERE ${whereClause.mkString(" AND ")}""" } @@ -290,6 +311,7 @@ class QuerySchema(q : ConjunctiveQuery) { case Variable(v, r, i) => if( ! (query_schema contains v) ) query_schema += { v -> (index, Variable(v,r,i) ) } + case _ => } } } @@ -354,13 +376,14 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var inputQueries = new ListBuffer[String]() for (stmt <- stmts) { for (cqBody <- stmt.q.bodies) { - val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody)) + val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody), stmt.q.conditions) // Generate the body of the query. val qs = new QuerySchema( tmpCq ) if (stmt.supervision != null) { if (stmt.q.bodies.length > 1) ss.error(s"Scoping rule does not allow disjunction.\n") val headTerms = tmpCq.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" + case Constant(v,r,i) => Some(v) } val index = qs.getBodyIndex(stmt.supervision) val name = ss.resolveName(qs.getVar(stmt.supervision)) @@ -372,6 +395,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } else if ((ss.schemaDeclarationGroupByHead contains stmt.q.head.name) && (ss.schemaDeclarationGroupByHead(stmt.q.head.name)(0).isQuery) && (stmt.q.head.name startsWith "dd_new_")) { val headTerms = tmpCq.head.terms map { case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" + case Constant(v,r,i) => Some(v) } val headTermsStr = ( headTerms :+ "id" ).mkString(", ") inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, label @@ -390,6 +414,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } val variableCols = tmpCq.head.terms flatMap { case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, tmpCq, resolveColumnFlag) + case Constant(v,rr,i) => Some(v) } val selectStr = variableCols.mkString(", ") @@ -471,7 +496,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { for (cqBody <- stmt.q.bodies) { // edge query val fakeBody = stmt.q.head +: cqBody - val fakeCQ = ConjunctiveQuery(stmt.q.head, List(fakeBody)) // we will just use the fakeBody below. + val fakeCQ = ConjunctiveQuery(stmt.q.head, List(fakeBody), stmt.q.conditions) // we will just use the fakeBody below. val index = cqBody.length + 1 val qs2 = new QuerySchema( fakeCQ ) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 16564f6fc..874582076 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -23,12 +23,18 @@ object DeepDiveLogDeltaDeriver{ val incCqHead = if (isInference) { cq.head.copy( name = newPrefix + cq.head.name, - terms = cq.head.terms map {term => term.copy(relName = newPrefix + term.relName)} + terms = cq.head.terms map { + case term: Variable => term.copy(relName = newPrefix + term.relName) + case term: Constant => term + } ) } else { cq.head.copy( name = deltaPrefix + cq.head.name, - terms = cq.head.terms map {term => term.copy(relName = deltaPrefix + term.relName)} + terms = cq.head.terms map { + case term: Variable => term.copy(relName = deltaPrefix + term.relName) + case term: Constant => term + } ) } @@ -39,14 +45,20 @@ object DeepDiveLogDeltaDeriver{ val incDeltaBody = body map { a => a.copy( name = deltaPrefix + a.name, - terms = a.terms map {term => term.copy(relName = deltaPrefix + term.relName)} + terms = a.terms map { + case term: Variable => term.copy(relName = deltaPrefix + term.relName) + case term: Constant => term + } ) } // New body val incNewBody = body map { a => a.copy( name = newPrefix + a.name, - terms = a.terms map {term => term.copy(relName = newPrefix + term.relName)} + terms = a.terms map { + case term: Variable => term.copy(relName = newPrefix + term.relName) + case term: Constant => term + } ) } var i = 0 @@ -69,7 +81,7 @@ object DeepDiveLogDeltaDeriver{ } } } - ConjunctiveQuery(incCqHead, incCqBodies.toList) + ConjunctiveQuery(incCqHead, incCqBodies.toList, Nil) } // Incremental scheme declaration, @@ -101,7 +113,7 @@ object DeepDiveLogDeltaDeriver{ // if (!stmt.isQuery) { incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(incNewStmt.a.name, incNewStmt.a.terms), - List(List(Atom(stmt.a.name, stmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))))) + List(List(Atom(stmt.a.name, stmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))), Nil)) // } incrementalStatement.toList } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala index 8be539d0a..b34555547 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -20,7 +20,7 @@ object DeepDiveLogMergeDeriver{ ) ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, stmt.a.terms), - List(List(Atom(incNewStmt.a.name, incNewStmt.a.terms))))) + List(List(Atom(incNewStmt.a.name, incNewStmt.a.terms))), Nil)) } def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 65ce612d7..a3111cee3 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -10,13 +10,21 @@ import scala.util.Try // *************************************** // * The union types for for the parser. * // *************************************** -case class Variable(varName : String, relName : String, index : Int ) +// case class Variable(varName : String, relName : String, index : Int ) // TODO make Atom a trait, and have multiple case classes, e.g., RelationAtom and CondExprAtom -case class Atom(name : String, terms : List[Variable]) +// ddlog column variable type: constant or variable +sealed trait ColumnVariable +case class Variable(varName : String, relName : String, index : Int ) extends ColumnVariable +case class Constant(value : String, relName: String, index: Int) extends ColumnVariable +case class Atom(name : String, terms : List[ColumnVariable]) case class Attribute(name : String, terms : List[Variable], types : List[String]) -case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]]) +case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[List[Condition]]) case class Column(name : String, t : String) +// condition +case class Condition(lhs: String, op: String, rhs: String, isRhsValue: Boolean) +case class BodyWithConditions(body: List[Atom], conditions: List[Condition]) + // variable type sealed trait VariableType { def cardinality: Long @@ -63,6 +71,10 @@ class DeepDiveLogParser extends JavaTokenParsers { s => StringEscapeUtils.unescapeJava( s.stripPrefix("\"").stripSuffix("\"")) } + def stringLiteralAsSqlString = stringLiteral ^^ { s => + s"""'${s.stripPrefix("\"").stripSuffix("\"")}'""" + } + def constant = stringLiteralAsSqlString | wholeNumber // C/Java/Scala-style as well as shell script-style comments are supported // by treating them as whiteSpace @@ -102,31 +114,52 @@ class DeepDiveLogParser extends JavaTokenParsers { } } + def variable = variableName ^^ { Variable(_, "", 0) } + def columnConstant = constant ^^ { Constant(_, "", 0) } + def column = columnConstant | variable // TODO support aggregate function syntax somehow - def cqHead = relationName ~ "(" ~ repsep(variableName, ",") ~ ")" ^^ { + def cqHead = relationName ~ "(" ~ repsep(column, ",") ~ ")" ^^ { case (r ~ "(" ~ variableUses ~ ")") => Atom(r, variableUses.zipWithIndex map { - case(name,i) => Variable(name, r, i) + case(Variable(name,_,_),i) => Variable(name, r, i) + case(Constant(name,_,_),i) => Constant(name, r, i) }) } // TODO add conditional expressions for where clause def cqConditionalExpr = failure("No conditional expression supported yet") def cqBodyAtom: Parser[Atom] = - ( relationName ~ "(" ~ repsep(variableName, ",") ~ ")" ^^ { + ( relationName ~ "(" ~ repsep(column, ",") ~ ")" ^^ { case (r ~ "(" ~ variableBindings ~ ")") => Atom(r, variableBindings.zipWithIndex map { - case(name,i) => Variable(name, r, i) + case(Variable(name,_,_),i) => Variable(name, r, i) + case(Constant(name,_,_),i) => Constant(name, r, i) }) } | cqConditionalExpr ) def cqBody: Parser[List[Atom]] = rep1sep(cqBodyAtom, ",") + + // conditions + def convertOperator = "||" | "::" + def filterOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" + def conditionWithConstant = variableName ~ filterOperator ~ constant ^^ { + case (lhs ~ op ~ rhs) => Condition(lhs, op, rhs, true) + } + def conditionWithVariable = variableName ~ filterOperator ~ variableName ^^ { + case (lhs ~ op ~ rhs) => Condition(lhs, op, rhs, false) + } + def condition = conditionWithVariable | conditionWithConstant + def cqCondition: Parser[List[Condition]] = repsep(condition, ",") + def cqBodyWithCondition = cqBody ~ opt(",") ~ cqCondition ^^ { + case (b ~ o ~ c) => BodyWithConditions(b, c) + } + def conjunctiveQuery : Parser[ConjunctiveQuery] = - cqHead ~ ":-" ~ rep1sep(cqBody, ";") ^^ { + cqHead ~ ":-" ~ rep1sep(cqBodyWithCondition, ";") ^^ { case (headatom ~ ":-" ~ disjunctiveBodies) => - ConjunctiveQuery(headatom, disjunctiveBodies) + ConjunctiveQuery(headatom, disjunctiveBodies.map(_.body), disjunctiveBodies.map(_.conditions)) } def relationType: Parser[RelationType] = diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 2d3c300eb..1e8643cd3 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -50,7 +50,15 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { def print(cq: ConjunctiveQuery): String = { val printAtom = {a:Atom => - val vars = a.terms map { _.varName } + val vars = a.terms map { + case x: Variable => x.varName + case x: Constant => { + if (x.value.startsWith("'")) + s""" "${x.value.stripPrefix("'").stripSuffix("'")}" """ + else + x.value + } + } s"${a.name}(${vars.mkString(", ")})" } val printListAtom = {a:List[Atom] => diff --git a/test/expected-output-test/conditions/compile.expected b/test/expected-output-test/conditions/compile.expected new file mode 100644 index 000000000..e69de29bb diff --git a/test/expected-output-test/conditions/input.ddl b/test/expected-output-test/conditions/input.ddl new file mode 100644 index 000000000..050cb0f7d --- /dev/null +++ b/test/expected-output-test/conditions/input.ddl @@ -0,0 +1,5 @@ +a(k int). +b(k int, p text, q text, r int). +c(s text, n int, t text). + +Q("test", 123, id) :- a(id), b(id, x,y,z), c("foo", 10, t), z>100. \ No newline at end of file From db4f5517e84fe8256f8f8a1201719c77645fc587 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Sat, 4 Jul 2015 21:34:37 -0700 Subject: [PATCH 141/347] expressions --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 109 ++++++++++++------ .../ddlog/DeepDiveLogDeltaDeriver.scala | 41 ++++--- .../ddlog/DeepDiveLogMergeDeriver.scala | 8 +- .../deepdive/ddlog/DeepDiveLogParser.scala | 59 +++++++--- .../ddlog/DeepDiveLogPrettyPrinter.scala | 23 ++-- .../expected-output-test/conditions/input.ddl | 2 +- 6 files changed, 167 insertions(+), 75 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 3ea56435c..0ac6d9122 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -194,26 +194,58 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // and stick it in a map. val qs = new QuerySchema(z) + // def resolveVarOrConst(variable: ColumnVariable, bodyIndex: Int) = { + // variable match { + // case Variable(varName,relName,index) => { + // val canonical_body_index = qs.getBodyIndex(varName) + // if (canonical_body_index != bodyIndex) { + // val real_attr_name1 = resolveName( Variable(varName, relName, index) ) + // val real_attr_name2 = resolveName( qs.getVar(varName)) + // Some(s"R${ bodyIndex }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") + // } else { None } + // } + // case Constant(v,relName,i) => { + // val attr = schema(relName, i) + // Some(s"R${bodyIndex}.${attr} = ${v}") + // } + // } + // } + + def resolveVarOrConst(variable: ColumnVariable, bodyIndex: Int) : String = { + variable match { + case Variable(varName,relName,index) => resolveColumn(varName, qs, z, OriginalOnly).get + case Constant(v,relName,i) => "v" + } + } + var whereClause = z.bodies(0).zipWithIndex flatMap { - case (Atom(relName, terms),body_index) => { - terms flatMap { - case Variable(varName, relName, index) => - val canonical_body_index = qs.getBodyIndex(varName) - - if (canonical_body_index != body_index) { - val real_attr_name1 = resolveName( Variable(varName, relName, index) ) - val real_attr_name2 = resolveName( qs.getVar(varName)) - Some(s"R${ body_index }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") - } else { None } - // a constant in body means a equality condition - case Constant(value, r, i) => { - val attr = schema(relName, i) - Some(s"R${body_index}.${attr} = ${value}") + case (Atom(relName, terms),bodyIndex) => { + terms flatMap { case Expression(vars, ops, relName, index) => + // simple variable name or constant + if (ops isEmpty) { + vars(0) match { + case Variable(varName,relName,index) => { + val canonical_body_index = qs.getBodyIndex(varName) + if (canonical_body_index != bodyIndex) { + val real_attr_name1 = resolveName( Variable(varName, relName, index) ) + val real_attr_name2 = resolveName( qs.getVar(varName)) + Some(s"R${ bodyIndex }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") + } else { None } + } + case Constant(v,relName,i) => { + val attr = schema(relName, i) + Some(s"R${bodyIndex}.${attr} = ${v}") + } + } + } else { // expression + val resolvedVars = vars map (resolveVarOrConst(_, bodyIndex)) + val expr = resolvedVars(0) + " " + ((ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString + val attr = schema(relName, index) + Some(s"${expr} = R${bodyIndex}.${attr}") } } } } - println(whereClause) // resolve conditions val conditions = z.conditions(0) flatMap { case Condition(lhs, op, rhs, isRhsValue) => @@ -224,7 +256,6 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } Some(s"${resolvedLhs.get} ${op} ${resolvedRhs.get}") } - println(conditions) whereClause = whereClause ++ conditions @@ -307,11 +338,15 @@ class QuerySchema(q : ConjunctiveQuery) { def generateCanonicalVar() = { q.bodies(0).zipWithIndex.foreach { case (Atom(relName,terms),index) => { - terms.foreach { - case Variable(v, r, i) => - if( ! (query_schema contains v) ) - query_schema += { v -> (index, Variable(v,r,i) ) } - case _ => + terms.foreach { case Expression(c, op, r, i) => + if (op.isEmpty) { + c(0) match { + case Variable(v,r,i) => + if (! (query_schema contains v) ) + query_schema += { v -> (index, Variable(v,r,i) ) } + case _ => + } + } } } } @@ -379,12 +414,26 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody), stmt.q.conditions) // Generate the body of the query. val qs = new QuerySchema( tmpCq ) + + // map head terms to sql + def mapHeadTerms(terms: List[Expression], alias: AliasStyle = OriginalOnly) = { + terms map { case Expression(v, ops, _, _) => + val resolvedVars = v map (resolveVarOrConst(_)) + val expr = resolvedVars(0) + ((ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString + expr + } + } + + def resolveVarOrConst(v: ColumnVariable, alias: AliasStyle = OriginalOnly) = { + v match { + case Variable(v,r,i) => ss.resolveColumn(v, qs, tmpCq, alias).get + case Constant(v,r,i) => v + } + } + if (stmt.supervision != null) { if (stmt.q.bodies.length > 1) ss.error(s"Scoping rule does not allow disjunction.\n") - val headTerms = tmpCq.head.terms map { - case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" - case Constant(v,r,i) => Some(v) - } + val headTerms = mapHeadTerms(tmpCq.head.terms) val index = qs.getBodyIndex(stmt.supervision) val name = ss.resolveName(qs.getVar(stmt.supervision)) val labelCol = s"R${index}.${name}" @@ -393,10 +442,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { ${ ss.generateSQLBody(tmpCq) } """ } else if ((ss.schemaDeclarationGroupByHead contains stmt.q.head.name) && (ss.schemaDeclarationGroupByHead(stmt.q.head.name)(0).isQuery) && (stmt.q.head.name startsWith "dd_new_")) { - val headTerms = tmpCq.head.terms map { - case Variable(v,r,i) => s"R${i}.${ss.resolveName(qs.getVar(v)) }" - case Constant(v,r,i) => Some(v) - } + val headTerms = mapHeadTerms(tmpCq.head.terms) val headTermsStr = ( headTerms :+ "id" ).mkString(", ") inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, label ${ ss.generateSQLBody(tmpCq) } @@ -412,10 +458,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case true => OriginalOnly case false => OriginalAndAlias } - val variableCols = tmpCq.head.terms flatMap { - case(Variable(v,rr,i)) => ss.resolveColumn(v, qs, tmpCq, resolveColumnFlag) - case Constant(v,rr,i) => Some(v) - } + val variableCols = mapHeadTerms(tmpCq.head.terms, resolveColumnFlag) val selectStr = variableCols.mkString(", ") diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 874582076..25dfc318b 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -19,22 +19,25 @@ object DeepDiveLogDeltaDeriver{ } def transform(cq: ConjunctiveQuery, isInference: Boolean, mode: String): ConjunctiveQuery = { + // transform an expression + def transformExpression(expr: Expression, prefix: String) = { + val newVars = expr.variables map { + case term: Variable => term.copy(relName = prefix + term.relName) + case term: Constant => term + } + Expression(newVars, expr.ops, expr.relName, expr.index) + } + // New head val incCqHead = if (isInference) { cq.head.copy( name = newPrefix + cq.head.name, - terms = cq.head.terms map { - case term: Variable => term.copy(relName = newPrefix + term.relName) - case term: Constant => term - } + terms = cq.head.terms map (transformExpression(_, newPrefix)) ) } else { cq.head.copy( name = deltaPrefix + cq.head.name, - terms = cq.head.terms map { - case term: Variable => term.copy(relName = deltaPrefix + term.relName) - case term: Constant => term - } + terms = cq.head.terms map (transformExpression(_, deltaPrefix)) ) } @@ -45,20 +48,14 @@ object DeepDiveLogDeltaDeriver{ val incDeltaBody = body map { a => a.copy( name = deltaPrefix + a.name, - terms = a.terms map { - case term: Variable => term.copy(relName = deltaPrefix + term.relName) - case term: Constant => term - } + terms = a.terms map (transformExpression(_, deltaPrefix)) ) } // New body val incNewBody = body map { a => a.copy( name = newPrefix + a.name, - terms = a.terms map { - case term: Variable => term.copy(relName = newPrefix + term.relName) - case term: Constant => term - } + terms = a.terms map (transformExpression(_, newPrefix)) ) } var i = 0 @@ -111,9 +108,17 @@ object DeepDiveLogDeltaDeriver{ ) incrementalStatement += incNewStmt + // from schema declaration to expressions + def variableToExpr(v: Variable) = Expression(List(v), List(), v.relName, v.index) + val originalExpr = stmt.a.terms map (variableToExpr(_)) + val incDeltaExpr = incDeltaStmt.a.terms map (variableToExpr(_)) + val incNewExpr = incNewStmt.a.terms map (variableToExpr(_)) + // if (!stmt.isQuery) { - incrementalStatement += ExtractionRule(ConjunctiveQuery(Atom(incNewStmt.a.name, incNewStmt.a.terms), - List(List(Atom(stmt.a.name, stmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))), Nil)) + incrementalStatement += ExtractionRule(ConjunctiveQuery( + Atom(incNewStmt.a.name, incNewExpr), + List(List(Atom(stmt.a.name, originalExpr)), List(Atom(incDeltaStmt.a.name, incDeltaExpr))), + List())) // } incrementalStatement.toList } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala index b34555547..8b581d42f 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -19,8 +19,12 @@ object DeepDiveLogMergeDeriver{ ) ) - ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, stmt.a.terms), - List(List(Atom(incNewStmt.a.name, incNewStmt.a.terms))), Nil)) + def variableToExpr(v: Variable) = Expression(List(v), List(), v.relName, v.index) + val originalExpr = stmt.a.terms map (variableToExpr(_)) + val incNewExpr = incNewStmt.a.terms map (variableToExpr(_)) + + ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, originalExpr), + List(List(Atom(incNewStmt.a.name, incNewExpr))), Nil)) } def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index a3111cee3..eaab4a531 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -16,7 +16,15 @@ import scala.util.Try sealed trait ColumnVariable case class Variable(varName : String, relName : String, index : Int ) extends ColumnVariable case class Constant(value : String, relName: String, index: Int) extends ColumnVariable -case class Atom(name : String, terms : List[ColumnVariable]) +case class Expression(variables: List[ColumnVariable], ops: List[String], relName: String, index: Int) { + def print(resolve: ColumnVariable => String) = { + val resolvedVars = variables map (resolve(_)) + resolvedVars(0) + ((ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString + } +} +case class Operator(operator: String, operand: ColumnVariable) + +case class Atom(name : String, terms : List[Expression]) case class Attribute(name : String, terms : List[Variable], types : List[String]) case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[List[Condition]]) case class Column(name : String, t : String) @@ -54,7 +62,7 @@ case class RowWiseLineHandler(format: String, command: String) extends FunctionI // Statements that will be parsed and compiled trait Statement -case class SchemaDeclaration( a : Attribute , isQuery : Boolean, variableType : Option[VariableType] = None) extends Statement // atom and whether this is a query relation. +case class SchemaDeclaration( a : Attribute , isQuery : Boolean, variableType : Option[VariableType]) extends Statement // atom and whether this is a query relation. case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration], mode: String = null) extends Statement case class ExtractionRule(q : ConjunctiveQuery, supervision: String = null) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule @@ -74,7 +82,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def stringLiteralAsSqlString = stringLiteral ^^ { s => s"""'${s.stripPrefix("\"").stripSuffix("\"")}'""" } - def constant = stringLiteralAsSqlString | wholeNumber + def constant = stringLiteralAsSqlString | wholeNumber | "TRUE" | "FALSE" | "TEXT" | "INT" | "BOOLEAN" // C/Java/Scala-style as well as shell script-style comments are supported // by treating them as whiteSpace @@ -102,8 +110,8 @@ class DeepDiveLogParser extends JavaTokenParsers { def dataType = CategoricalParser | BooleanParser def schemaDeclaration: Parser[SchemaDeclaration] = - relationName ~ opt("?") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ~ opt(dataType) ^^ { - case (r ~ isQuery ~ "(" ~ attrs ~ ")" ~ vType) => { + relationName ~ opt("?") ~ opt("!") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ~ opt(dataType) ^^ { + case (r ~ isQuery ~ isDistinct ~ "(" ~ attrs ~ ")" ~ vType) => { val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } var types = attrs map { case(x) => x.t } val variableType = vType match { @@ -114,35 +122,58 @@ class DeepDiveLogParser extends JavaTokenParsers { } } + def operator = "||" | "+" | "-" | "*" | "/" + def castOp = "::" + def variable = variableName ^^ { Variable(_, "", 0) } def columnConstant = constant ^^ { Constant(_, "", 0) } - def column = columnConstant | variable + def variableOrConstant = columnConstant | variable + def operateOn = operator ~ variableOrConstant ^^ { case (v ~ o) => Operator(v,o) } + def typecast = castOp ~ columnConstant ^^ { case (v ~ o) => Operator(v,o) } + def operatorAndOperand = operateOn | typecast + def expression = variableOrConstant ~ rep(operatorAndOperand) ^^ { + case (v ~ opList) => { + val variables = List(v) ++ (opList map (_.operand)) + val ops = opList map (_.operator) + Expression(variables, ops, "", 0) + } + } // TODO support aggregate function syntax somehow - def cqHead = relationName ~ "(" ~ repsep(column, ",") ~ ")" ^^ { + def cqHead = relationName ~ "(" ~ repsep(expression, ",") ~ ")" ^^ { case (r ~ "(" ~ variableUses ~ ")") => Atom(r, variableUses.zipWithIndex map { - case(Variable(name,_,_),i) => Variable(name, r, i) - case(Constant(name,_,_),i) => Constant(name, r, i) + case (Expression(v,op,_,_),i) => { + val vars = v map { + case Variable(x,_,_) => Variable(x,r,i) + case Constant(x,_,_) => Constant(x,r,i) + } + Expression(vars, op, r, i) + } }) } // TODO add conditional expressions for where clause def cqConditionalExpr = failure("No conditional expression supported yet") def cqBodyAtom: Parser[Atom] = - ( relationName ~ "(" ~ repsep(column, ",") ~ ")" ^^ { + ( relationName ~ "(" ~ repsep(expression, ",") ~ ")" ^^ { case (r ~ "(" ~ variableBindings ~ ")") => Atom(r, variableBindings.zipWithIndex map { - case(Variable(name,_,_),i) => Variable(name, r, i) - case(Constant(name,_,_),i) => Constant(name, r, i) - }) + case (Expression(v,op,_,_),i) => { + val vars = v map { + case Variable(x,_,_) => Variable(x,r,i) + case Constant(x,_,_) => Constant(x,r,i) + } + // println(Expression(vars, op, r, i)) + Expression(vars, op, r, i) + } + }) } | cqConditionalExpr ) def cqBody: Parser[List[Atom]] = rep1sep(cqBodyAtom, ",") // conditions - def convertOperator = "||" | "::" def filterOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" def conditionWithConstant = variableName ~ filterOperator ~ constant ^^ { case (lhs ~ op ~ rhs) => Condition(lhs, op, rhs, true) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 1e8643cd3..e680951ba 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -48,22 +48,31 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { |""".stripMargin } + def printVarOrConst(x: ColumnVariable) = { + x match { + case x: Variable => x.varName + case x: Constant => { + if (x.value.startsWith("'")) + s""" "${x.value.stripPrefix("'").stripSuffix("'")}" """ + else + x.value + } + } + } + def print(cq: ConjunctiveQuery): String = { val printAtom = {a:Atom => val vars = a.terms map { - case x: Variable => x.varName - case x: Constant => { - if (x.value.startsWith("'")) - s""" "${x.value.stripPrefix("'").stripSuffix("'")}" """ - else - x.value - } + case e => e.print(printVarOrConst) } s"${a.name}(${vars.mkString(", ")})" } val printListAtom = {a:List[Atom] => s"${(a map printAtom).mkString(",\n ")}" } + val printCondition = {a: List[Condition] => + (a map { case Condition(lhs, op, rhs, _) => s"${lhs} ${op} ${rhs}" }).mkString(",") + } s"""${printAtom(cq.head)} :- | ${(cq.bodies map printListAtom).mkString(";\n ")}""".stripMargin } diff --git a/test/expected-output-test/conditions/input.ddl b/test/expected-output-test/conditions/input.ddl index 050cb0f7d..6c14b88d1 100644 --- a/test/expected-output-test/conditions/input.ddl +++ b/test/expected-output-test/conditions/input.ddl @@ -2,4 +2,4 @@ a(k int). b(k int, p text, q text, r int). c(s text, n int, t text). -Q("test", 123, id) :- a(id), b(id, x,y,z), c("foo", 10, t), z>100. \ No newline at end of file +Q("test" :: TEXT, 123, id) :- a(id), b(id, x,y,z), c(x || y,10,"foo"), z>100. \ No newline at end of file From 4acacde0230546bd28e3babdf1a82b15482185ec Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Sat, 4 Jul 2015 22:59:12 -0700 Subject: [PATCH 142/347] add support for compound conditions --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 54 ++++++-------- .../deepdive/ddlog/DeepDiveLogParser.scala | 24 +++---- .../ddlog/DeepDiveLogPrettyPrinter.scala | 6 +- .../conditions/compile.expected | 0 .../expressions/compile.expected | 71 +++++++++++++++++++ .../{conditions => expressions}/input.ddl | 2 +- test/test.sh | 3 +- 7 files changed, 109 insertions(+), 51 deletions(-) delete mode 100644 test/expected-output-test/conditions/compile.expected create mode 100644 test/expected-output-test/expressions/compile.expected rename test/expected-output-test/{conditions => expressions}/input.ddl (81%) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 0ac6d9122..eb725b9b3 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -194,30 +194,20 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // and stick it in a map. val qs = new QuerySchema(z) - // def resolveVarOrConst(variable: ColumnVariable, bodyIndex: Int) = { - // variable match { - // case Variable(varName,relName,index) => { - // val canonical_body_index = qs.getBodyIndex(varName) - // if (canonical_body_index != bodyIndex) { - // val real_attr_name1 = resolveName( Variable(varName, relName, index) ) - // val real_attr_name2 = resolveName( qs.getVar(varName)) - // Some(s"R${ bodyIndex }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") - // } else { None } - // } - // case Constant(v,relName,i) => { - // val attr = schema(relName, i) - // Some(s"R${bodyIndex}.${attr} = ${v}") - // } - // } - // } - - def resolveVarOrConst(variable: ColumnVariable, bodyIndex: Int) : String = { + def resolveVarOrConst(variable: ColumnVariable) : String = { variable match { case Variable(varName,relName,index) => resolveColumn(varName, qs, z, OriginalOnly).get - case Constant(v,relName,i) => "v" + case Constant(v,relName,i) => v } } + // // resolve an expression + // def resolveExpression(e: Expression) = { + // val resolvedVars = e.variables map (resolveVarOrConst(_)) + // resolvedVars(0) + " " + ((e.ops zip resolvedVars.drop(1)).map { + // case (a,b) => s"${a} ${b}" }).mkString + // } + var whereClause = z.bodies(0).zipWithIndex flatMap { case (Atom(relName, terms),bodyIndex) => { terms flatMap { case Expression(vars, ops, relName, index) => @@ -238,8 +228,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } } } else { // expression - val resolvedVars = vars map (resolveVarOrConst(_, bodyIndex)) - val expr = resolvedVars(0) + " " + ((ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString + val expr = Expression(vars, ops, relName, index).print(resolveVarOrConst) val attr = schema(relName, index) Some(s"${expr} = R${bodyIndex}.${attr}") } @@ -248,20 +237,17 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } // resolve conditions - val conditions = z.conditions(0) flatMap { case Condition(lhs, op, rhs, isRhsValue) => - val resolvedLhs = resolveColumn(lhs, qs, z, OriginalOnly) - val resolvedRhs = isRhsValue match { - case true => Some(rhs) - case false => resolveColumn(rhs, qs, z, OriginalOnly) - } - Some(s"${resolvedLhs.get} ${op} ${resolvedRhs.get}") - } - - whereClause = whereClause ++ conditions - + val conditions = ((z.conditions(0).conditions map { case x: List[Condition] => + (x map { case Condition(lhs, op, rhs) => + val lhsExpr = lhs.print(resolveVarOrConst) + val rhsExpr = rhs.print(resolveVarOrConst) + s"${lhsExpr} ${op} ${rhsExpr}" + }).mkString(" AND ") + }) map(v => s"(${v})")).mkString(" OR ") + var whereClauseStr = whereClause match { - case Nil => "" - case _ => s"""WHERE ${whereClause.mkString(" AND ")}""" + case Nil => if (conditions == "") "" else s"WHERE ${conditions}" + case _ => s"""WHERE ${whereClause.mkString(" AND ")} ${if (conditions == "") "" else s" AND (${conditions})"}""" } s"""FROM ${ bodyNames } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index eaab4a531..f87e22469 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -26,12 +26,14 @@ case class Operator(operator: String, operand: ColumnVariable) case class Atom(name : String, terms : List[Expression]) case class Attribute(name : String, terms : List[Variable], types : List[String]) -case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[List[Condition]]) +case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[CompoundCondition]) case class Column(name : String, t : String) // condition -case class Condition(lhs: String, op: String, rhs: String, isRhsValue: Boolean) -case class BodyWithConditions(body: List[Atom], conditions: List[Condition]) +case class BodyWithConditions(body: List[Atom], conditions: CompoundCondition) + +case class Condition(lhs: Expression, op: String, rhs: Expression) +case class CompoundCondition(conditions: List[List[Condition]]) // variable type sealed trait VariableType { @@ -175,16 +177,14 @@ class DeepDiveLogParser extends JavaTokenParsers { // conditions def filterOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" - def conditionWithConstant = variableName ~ filterOperator ~ constant ^^ { - case (lhs ~ op ~ rhs) => Condition(lhs, op, rhs, true) - } - def conditionWithVariable = variableName ~ filterOperator ~ variableName ^^ { - case (lhs ~ op ~ rhs) => Condition(lhs, op, rhs, false) + def condition = expression ~ filterOperator ~ expression ^^ { + case (lhs ~ op ~ rhs) => Condition(lhs, op, rhs) } - def condition = conditionWithVariable | conditionWithConstant - def cqCondition: Parser[List[Condition]] = repsep(condition, ",") - def cqBodyWithCondition = cqBody ~ opt(",") ~ cqCondition ^^ { - case (b ~ o ~ c) => BodyWithConditions(b, c) + def conjunctiveCondition = repsep(condition, ",") + def compoundCondition = repsep(conjunctiveCondition, ";") ^^ { CompoundCondition(_) } + // def cqCondition: Parser[List[Condition]] = repsep(condition, ",") + def cqBodyWithCondition = cqBody ~ opt(",") ~ opt("[") ~ compoundCondition ~ opt("]") ^^ { + case (b ~ _ ~ _ ~ c ~ _) => BodyWithConditions(b, c) } def conjunctiveQuery : Parser[ConjunctiveQuery] = diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index e680951ba..1cb3111d0 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -70,9 +70,9 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { val printListAtom = {a:List[Atom] => s"${(a map printAtom).mkString(",\n ")}" } - val printCondition = {a: List[Condition] => - (a map { case Condition(lhs, op, rhs, _) => s"${lhs} ${op} ${rhs}" }).mkString(",") - } + // val printCondition = {a: List[Condition] => + // (a map { case Condition(lhs, op, rhs, _) => s"${lhs} ${op} ${rhs}" }).mkString(",") + // } s"""${printAtom(cq.head)} :- | ${(cq.bodies map printListAtom).mkString(";\n ")}""".stripMargin } diff --git a/test/expected-output-test/conditions/compile.expected b/test/expected-output-test/conditions/compile.expected deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected new file mode 100644 index 000000000..2b90c3a5d --- /dev/null +++ b/test/expected-output-test/expressions/compile.expected @@ -0,0 +1,71 @@ + + deepdive.db.default { + driver: "org.postgresql.Driver" + url: "jdbc:postgresql://"${PGHOST}":"${PGPORT}"/"${DBNAME} + user: ${PGUSER} + password: ${PGPASSWORD} + dbname: ${DBNAME} + host: ${PGHOST} + port: ${PGPORT} + incremental_mode: ORIGINAL + } + + + + deepdive.schema.variables { + + } + + + deepdive.extraction.extractors.extraction_rule_1 { + sql: """ DROP TABLE IF EXISTS b CASCADE; + CREATE TABLE + b(k int, + p text, + q text, + r int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_0 { + sql: """ DROP TABLE IF EXISTS a CASCADE; + CREATE TABLE + a(k int) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_2 { + sql: """ DROP TABLE IF EXISTS c CASCADE; + CREATE TABLE + c(s text, + n int, + t text) + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.cleanup { + sql: """ + TRUNCATE b; + TRUNCATE a; + TRUNCATE c; + """ + style: "sql_extractor" + } + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP VIEW IF EXISTS Q; + CREATE VIEW Q AS + SELECT 'test':: TEXT, 123, R0.k + FROM a R0, b R1, c R2 + WHERE R1.k = R0.k AND R1.p|| R1.q = R2.s AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) + """ + style: "sql_extractor" + + } + +deepdive.pipeline.run: ${PIPELINE} +deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2] +deepdive.pipeline.pipelines.extraction: [extraction_rule_3] diff --git a/test/expected-output-test/conditions/input.ddl b/test/expected-output-test/expressions/input.ddl similarity index 81% rename from test/expected-output-test/conditions/input.ddl rename to test/expected-output-test/expressions/input.ddl index 6c14b88d1..df231a530 100644 --- a/test/expected-output-test/conditions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -2,4 +2,4 @@ a(k int). b(k int, p text, q text, r int). c(s text, n int, t text). -Q("test" :: TEXT, 123, id) :- a(id), b(id, x,y,z), c(x || y,10,"foo"), z>100. \ No newline at end of file +Q("test" :: TEXT, 123, id) :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; z < 20, z > 10]. \ No newline at end of file diff --git a/test/test.sh b/test/test.sh index cf48bf7dc..a466452b5 100755 --- a/test/test.sh +++ b/test/test.sh @@ -21,4 +21,5 @@ for t in *.bats.template; do done # run all .bats tests -bats "$@" *.bats */*.bats +# bats "$@" *.bats */*.bats +bats "$@" expected*/expressions.bats From 313bb8cd956cbdb4818d106470b6eef581bb8ea4 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Sun, 5 Jul 2015 18:32:36 -0700 Subject: [PATCH 143/347] fixes conflict in parsing compound conditions --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 38 ++++++++++--------- .../ddlog/DeepDiveLogDeltaDeriver.scala | 5 ++- .../ddlog/DeepDiveLogMergeDeriver.scala | 2 +- .../deepdive/ddlog/DeepDiveLogParser.scala | 16 ++++---- .../expressions/compile.expected | 2 +- 5 files changed, 34 insertions(+), 29 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index eb725b9b3..1d5286e88 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -201,13 +201,6 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } } - // // resolve an expression - // def resolveExpression(e: Expression) = { - // val resolvedVars = e.variables map (resolveVarOrConst(_)) - // resolvedVars(0) + " " + ((e.ops zip resolvedVars.drop(1)).map { - // case (a,b) => s"${a} ${b}" }).mkString - // } - var whereClause = z.bodies(0).zipWithIndex flatMap { case (Atom(relName, terms),bodyIndex) => { terms flatMap { case Expression(vars, ops, relName, index) => @@ -237,17 +230,24 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } // resolve conditions - val conditions = ((z.conditions(0).conditions map { case x: List[Condition] => - (x map { case Condition(lhs, op, rhs) => - val lhsExpr = lhs.print(resolveVarOrConst) - val rhsExpr = rhs.print(resolveVarOrConst) - s"${lhsExpr} ${op} ${rhsExpr}" - }).mkString(" AND ") - }) map(v => s"(${v})")).mkString(" OR ") + val conditionList = z.conditions(0) match { + case Some(c) => c.conditions map { case x: List[Condition] => + val inner = x map { case Condition(lhs, op, rhs) => + val lhsExpr = lhs.print(resolveVarOrConst) + val rhsExpr = rhs.print(resolveVarOrConst) + s"${lhsExpr} ${op} ${rhsExpr}" + } + inner.mkString(" AND ") + } + case None => List("") + } + + val conditions = conditionList flatMap ( v => if (v != "") Some(s"(${v})") else None) + val conditionStr = conditions.mkString(" OR ") var whereClauseStr = whereClause match { - case Nil => if (conditions == "") "" else s"WHERE ${conditions}" - case _ => s"""WHERE ${whereClause.mkString(" AND ")} ${if (conditions == "") "" else s" AND (${conditions})"}""" + case Nil => if (conditionStr == "") "" else s"WHERE ${conditionStr}" + case _ => s"""WHERE ${whereClause.mkString(" AND ")} ${if (conditionStr == "") "" else s" AND (${conditionStr})"}""" } s"""FROM ${ bodyNames } @@ -396,6 +396,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { def compileExtractionRules(stmts: List[ExtractionRule], ss: CompilationState): CompiledBlocks = { var inputQueries = new ListBuffer[String]() for (stmt <- stmts) { + // println(DeepDiveLogPrettyPrinter.print(stmt)) for (cqBody <- stmt.q.bodies) { val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody), stmt.q.conditions) // Generate the body of the query. @@ -404,7 +405,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // map head terms to sql def mapHeadTerms(terms: List[Expression], alias: AliasStyle = OriginalOnly) = { terms map { case Expression(v, ops, _, _) => - val resolvedVars = v map (resolveVarOrConst(_)) + val resolvedVars = v map (resolveVarOrConst(_, alias)) val expr = resolvedVars(0) + ((ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString expr } @@ -445,7 +446,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case false => OriginalAndAlias } val variableCols = mapHeadTerms(tmpCq.head.terms, resolveColumnFlag) - val selectStr = variableCols.mkString(", ") inputQueries += s""" @@ -681,6 +681,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // take an initial pass to analyze the parsed program val state = new CompilationState( programToCompile, config ) + // programToCompile foreach {stmt => println(DeepDiveLogPrettyPrinter.print(stmt))} + val body = new ListBuffer[String]() body ++= compileSchemaDeclarations((state.schemaDeclarationGroupByHead map (_._2)).flatten.toList, state) state.extractionRuleGroupByHead foreach {keyVal => body ++= compileExtractionRules(keyVal._2, state)} diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 25dfc318b..735074f95 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -78,7 +78,8 @@ object DeepDiveLogDeltaDeriver{ } } } - ConjunctiveQuery(incCqHead, incCqBodies.toList, Nil) + // TODO fix conditions + ConjunctiveQuery(incCqHead, incCqBodies.toList, cq.conditions) } // Incremental scheme declaration, @@ -118,7 +119,7 @@ object DeepDiveLogDeltaDeriver{ incrementalStatement += ExtractionRule(ConjunctiveQuery( Atom(incNewStmt.a.name, incNewExpr), List(List(Atom(stmt.a.name, originalExpr)), List(Atom(incDeltaStmt.a.name, incDeltaExpr))), - List())) + List(None))) // } incrementalStatement.toList } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala index 8b581d42f..d79654968 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -24,7 +24,7 @@ object DeepDiveLogMergeDeriver{ val incNewExpr = incNewStmt.a.terms map (variableToExpr(_)) ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, originalExpr), - List(List(Atom(incNewStmt.a.name, incNewExpr))), Nil)) + List(List(Atom(incNewStmt.a.name, incNewExpr))), List(None))) } def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index f87e22469..6a8192e0d 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -26,11 +26,11 @@ case class Operator(operator: String, operand: ColumnVariable) case class Atom(name : String, terms : List[Expression]) case class Attribute(name : String, terms : List[Variable], types : List[String]) -case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[CompoundCondition]) +case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[CompoundCondition]]) case class Column(name : String, t : String) // condition -case class BodyWithConditions(body: List[Atom], conditions: CompoundCondition) +case class BodyWithConditions(body: List[Atom], conditions: Option[CompoundCondition]) case class Condition(lhs: Expression, op: String, rhs: Expression) case class CompoundCondition(conditions: List[List[Condition]]) @@ -137,6 +137,7 @@ class DeepDiveLogParser extends JavaTokenParsers { case (v ~ opList) => { val variables = List(v) ++ (opList map (_.operand)) val ops = opList map (_.operator) + // println(Expression(variables, ops, "", 0)) Expression(variables, ops, "", 0) } } @@ -178,13 +179,14 @@ class DeepDiveLogParser extends JavaTokenParsers { // conditions def filterOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" def condition = expression ~ filterOperator ~ expression ^^ { - case (lhs ~ op ~ rhs) => Condition(lhs, op, rhs) + case (lhs ~ op ~ rhs) => { + Condition(lhs, op, rhs) + } } def conjunctiveCondition = repsep(condition, ",") - def compoundCondition = repsep(conjunctiveCondition, ";") ^^ { CompoundCondition(_) } - // def cqCondition: Parser[List[Condition]] = repsep(condition, ",") - def cqBodyWithCondition = cqBody ~ opt(",") ~ opt("[") ~ compoundCondition ~ opt("]") ^^ { - case (b ~ _ ~ _ ~ c ~ _) => BodyWithConditions(b, c) + def compoundCondition = "[" ~> repsep(conjunctiveCondition, ";") <~ "]" ^^ { CompoundCondition(_) } + def cqBodyWithCondition = cqBody ~ ("," ~> compoundCondition).? ^^ { + case (b ~ c) => BodyWithConditions(b, c) } def conjunctiveQuery : Parser[ConjunctiveQuery] = diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 2b90c3a5d..bb0710e7a 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -58,7 +58,7 @@ deepdive.extraction.extractors.extraction_rule_3 { sql: """ DROP VIEW IF EXISTS Q; CREATE VIEW Q AS - SELECT 'test':: TEXT, 123, R0.k + SELECT 'test':: TEXT, 123, R0.k AS "a.R0.k" FROM a R0, b R1, c R2 WHERE R1.k = R0.k AND R1.p|| R1.q = R2.s AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) """ From 3f32cff295972008e066d27c127e92f9c46ad3f9 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 6 Jul 2015 00:28:56 -0700 Subject: [PATCH 144/347] Fix delta deriver with conditions Match conditions with bodies when deriving delta rule --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 6 ++-- .../ddlog/DeepDiveLogDeltaDeriver.scala | 9 +++-- .../deepdive/ddlog/DeepDiveLogParser.scala | 7 +--- .../ddlog/DeepDiveLogPrettyPrinter.scala | 36 ++++++++++++++++--- 4 files changed, 41 insertions(+), 17 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 1d5286e88..88a97184f 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -221,7 +221,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } } } else { // expression - val expr = Expression(vars, ops, relName, index).print(resolveVarOrConst) + val expr = DeepDiveLogPrettyPrinter.printExpr(Expression(vars, ops, relName, index), resolveVarOrConst) val attr = schema(relName, index) Some(s"${expr} = R${bodyIndex}.${attr}") } @@ -233,8 +233,8 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C val conditionList = z.conditions(0) match { case Some(c) => c.conditions map { case x: List[Condition] => val inner = x map { case Condition(lhs, op, rhs) => - val lhsExpr = lhs.print(resolveVarOrConst) - val rhsExpr = rhs.print(resolveVarOrConst) + val lhsExpr = DeepDiveLogPrettyPrinter.printExpr(lhs, resolveVarOrConst) + val rhsExpr = DeepDiveLogPrettyPrinter.printExpr(rhs, resolveVarOrConst) s"${lhsExpr} ${op} ${rhsExpr}" } inner.mkString(" AND ") diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 735074f95..3ad56dfb9 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -42,8 +42,9 @@ object DeepDiveLogDeltaDeriver{ } var incCqBodies = new ListBuffer[List[Atom]]() + var incCqConditions = new ListBuffer[Option[CompoundCondition]]() // New incremental bodies - for (body <- cq.bodies) { + cq.bodies zip cq.conditions foreach { case (body, cond) => // Delta body val incDeltaBody = body map { a => a.copy( @@ -63,6 +64,7 @@ object DeepDiveLogDeltaDeriver{ var index = if (incrementalFunctionInput contains incCqHead.name) -1 else 0 if (mode == "inc") { incCqBodies += incNewBody + incCqConditions += cond } else { for (i <- index to (body.length - 1)) { var newBody = new ListBuffer[Atom]() @@ -73,13 +75,14 @@ object DeepDiveLogDeltaDeriver{ newBody += incNewBody(j) else if (j == i) newBody += incDeltaBody(j) + incCqConditions += cond } incCqBodies += newBody.toList } } } // TODO fix conditions - ConjunctiveQuery(incCqHead, incCqBodies.toList, cq.conditions) + ConjunctiveQuery(incCqHead, incCqBodies.toList, incCqConditions.toList) } // Incremental scheme declaration, @@ -119,7 +122,7 @@ object DeepDiveLogDeltaDeriver{ incrementalStatement += ExtractionRule(ConjunctiveQuery( Atom(incNewStmt.a.name, incNewExpr), List(List(Atom(stmt.a.name, originalExpr)), List(Atom(incDeltaStmt.a.name, incDeltaExpr))), - List(None))) + List(None, None))) // } incrementalStatement.toList } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 6a8192e0d..91ba31410 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -16,12 +16,7 @@ import scala.util.Try sealed trait ColumnVariable case class Variable(varName : String, relName : String, index : Int ) extends ColumnVariable case class Constant(value : String, relName: String, index: Int) extends ColumnVariable -case class Expression(variables: List[ColumnVariable], ops: List[String], relName: String, index: Int) { - def print(resolve: ColumnVariable => String) = { - val resolvedVars = variables map (resolve(_)) - resolvedVars(0) + ((ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString - } -} +case class Expression(variables: List[ColumnVariable], ops: List[String], relName: String, index: Int) case class Operator(operator: String, operand: ColumnVariable) case class Atom(name : String, terms : List[Expression]) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 1cb3111d0..e66a8cc14 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -60,21 +60,47 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { } } + def printExpr(e: Expression, resolve: ColumnVariable => String) = { + val resolvedVars = e.variables map (resolve(_)) + resolvedVars(0) + ((e.ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString + } + def print(cq: ConjunctiveQuery): String = { val printAtom = {a:Atom => val vars = a.terms map { - case e => e.print(printVarOrConst) + case e => printExpr(e, printVarOrConst) } s"${a.name}(${vars.mkString(", ")})" } val printListAtom = {a:List[Atom] => s"${(a map printAtom).mkString(",\n ")}" } - // val printCondition = {a: List[Condition] => - // (a map { case Condition(lhs, op, rhs, _) => s"${lhs} ${op} ${rhs}" }).mkString(",") - // } + + def printVar(x: ColumnVariable) = { x match { + case Variable(v,_,_) => v + case Constant(v,_,_) => v + } + } + val printConjunctiveCondition = {a: List[Condition] => + (a map { case Condition(lhs, op, rhs) => + val lhsExpr = printExpr(lhs, printVar) + val rhsExpr = printExpr(rhs, printVar) + s"${lhsExpr} ${op} ${rhsExpr}" }).mkString(", ") + } + val conditionList = cq.conditions map { + case Some(x) => Some(x.conditions map printConjunctiveCondition mkString("; ")) + case None => None + } + val bodyList = cq.bodies map printListAtom + val bodyWithCondition = (bodyList zip conditionList map { case(a,b) => + b match { + case Some(c) => s"${a}, [ ${c} ]" + case None => a + } + }).mkString(";\n ") + s"""${printAtom(cq.head)} :- - | ${(cq.bodies map printListAtom).mkString(";\n ")}""".stripMargin + | ${bodyWithCondition}""".stripMargin } def print(stmt: ExtractionRule): String = { From d15a3b5fa37efb9c39060d1284170035d70c4c58 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 6 Jul 2015 02:16:44 -0700 Subject: [PATCH 145/347] functions --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 75 ++++++++++--------- .../deepdive/ddlog/DeepDiveLogParser.scala | 17 ++++- .../ddlog/DeepDiveLogPrettyPrinter.scala | 24 +++--- .../expressions/compile.expected | 4 +- .../expressions/input.ddl | 2 +- .../expressions/print.expected | 16 ++++ test/test.sh | 3 +- 7 files changed, 87 insertions(+), 54 deletions(-) create mode 100644 test/expected-output-test/expressions/print.expected diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 88a97184f..00e72fea2 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -186,6 +186,24 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } } + def resolveColumnVar(v: ColumnVariable, cq: ConjunctiveQuery, alias: AliasStyle = OriginalOnly) = { + val qs = new QuerySchema(cq) + def resolveVarOrConst(x: ColumnVariable, alias: AliasStyle) = { + x match { + case Variable(v,r,i) => resolveColumn(v, qs, cq, alias).get + case Constant(v,r,i) => v + case _ => "" + } + } + v match { + case InlineFunction(name, args) => { + val resolvedArgs = args map (resolveVarOrConst(_, OriginalOnly)) + s"${name}(${resolvedArgs.mkString(", ")})" + } + case _ => resolveVarOrConst(v, alias) + } + } + // This is generic code that generates the FROM with positional aliasing R0, R1, etc. // and the corresponding WHERE clause (equating all variables) def generateSQLBody(z : ConjunctiveQuery) : String = { @@ -194,18 +212,15 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // and stick it in a map. val qs = new QuerySchema(z) - def resolveVarOrConst(variable: ColumnVariable) : String = { - variable match { - case Variable(varName,relName,index) => resolveColumn(varName, qs, z, OriginalOnly).get - case Constant(v,relName,i) => v - } - } - var whereClause = z.bodies(0).zipWithIndex flatMap { case (Atom(relName, terms),bodyIndex) => { terms flatMap { case Expression(vars, ops, relName, index) => - // simple variable name or constant - if (ops isEmpty) { + // simple variable + val simpleVar = (ops isEmpty) && (vars(0) match { + case x: Variable => true + case _ => false + }) + if (simpleVar) { vars(0) match { case Variable(varName,relName,index) => { val canonical_body_index = qs.getBodyIndex(varName) @@ -215,15 +230,12 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C Some(s"R${ bodyIndex }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") } else { None } } - case Constant(v,relName,i) => { - val attr = schema(relName, i) - Some(s"R${bodyIndex}.${attr} = ${v}") - } + case _ => None } } else { // expression - val expr = DeepDiveLogPrettyPrinter.printExpr(Expression(vars, ops, relName, index), resolveVarOrConst) + val expr = DeepDiveLogPrettyPrinter.printExpr(Expression(vars, ops, relName, index), resolveColumnVar(_,z)) val attr = schema(relName, index) - Some(s"${expr} = R${bodyIndex}.${attr}") + Some(s"R${bodyIndex}.${attr} = ${expr}") } } } @@ -233,8 +245,8 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C val conditionList = z.conditions(0) match { case Some(c) => c.conditions map { case x: List[Condition] => val inner = x map { case Condition(lhs, op, rhs) => - val lhsExpr = DeepDiveLogPrettyPrinter.printExpr(lhs, resolveVarOrConst) - val rhsExpr = DeepDiveLogPrettyPrinter.printExpr(rhs, resolveVarOrConst) + val lhsExpr = DeepDiveLogPrettyPrinter.printExpr(lhs, resolveColumnVar(_,z)) + val rhsExpr = DeepDiveLogPrettyPrinter.printExpr(rhs, resolveColumnVar(_,z)) s"${lhsExpr} ${op} ${rhsExpr}" } inner.mkString(" AND ") @@ -392,6 +404,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { schemas.toList } + // Generate extraction rule part for deepdive def compileExtractionRules(stmts: List[ExtractionRule], ss: CompilationState): CompiledBlocks = { var inputQueries = new ListBuffer[String]() @@ -402,25 +415,11 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // Generate the body of the query. val qs = new QuerySchema( tmpCq ) - // map head terms to sql - def mapHeadTerms(terms: List[Expression], alias: AliasStyle = OriginalOnly) = { - terms map { case Expression(v, ops, _, _) => - val resolvedVars = v map (resolveVarOrConst(_, alias)) - val expr = resolvedVars(0) + ((ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString - expr - } - } - - def resolveVarOrConst(v: ColumnVariable, alias: AliasStyle = OriginalOnly) = { - v match { - case Variable(v,r,i) => ss.resolveColumn(v, qs, tmpCq, alias).get - case Constant(v,r,i) => v - } - } - if (stmt.supervision != null) { if (stmt.q.bodies.length > 1) ss.error(s"Scoping rule does not allow disjunction.\n") - val headTerms = mapHeadTerms(tmpCq.head.terms) + val headTerms = tmpCq.head.terms map { x => + DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, OriginalOnly)) + } val index = qs.getBodyIndex(stmt.supervision) val name = ss.resolveName(qs.getVar(stmt.supervision)) val labelCol = s"R${index}.${name}" @@ -429,7 +428,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { ${ ss.generateSQLBody(tmpCq) } """ } else if ((ss.schemaDeclarationGroupByHead contains stmt.q.head.name) && (ss.schemaDeclarationGroupByHead(stmt.q.head.name)(0).isQuery) && (stmt.q.head.name startsWith "dd_new_")) { - val headTerms = mapHeadTerms(tmpCq.head.terms) + val headTerms = tmpCq.head.terms map { x => + DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, OriginalOnly)) + } val headTermsStr = ( headTerms :+ "id" ).mkString(", ") inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, label ${ ss.generateSQLBody(tmpCq) } @@ -445,7 +446,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case true => OriginalOnly case false => OriginalAndAlias } - val variableCols = mapHeadTerms(tmpCq.head.terms, resolveColumnFlag) + val variableCols = tmpCq.head.terms map { x => + DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, resolveColumnFlag)) + } val selectStr = variableCols.mkString(", ") inputQueries += s""" diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 91ba31410..cc801cb51 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -16,6 +16,7 @@ import scala.util.Try sealed trait ColumnVariable case class Variable(varName : String, relName : String, index : Int ) extends ColumnVariable case class Constant(value : String, relName: String, index: Int) extends ColumnVariable +case class InlineFunction(functionName: String, args: List[ColumnVariable]) extends ColumnVariable case class Expression(variables: List[ColumnVariable], ops: List[String], relName: String, index: Int) case class Operator(operator: String, operand: ColumnVariable) @@ -125,14 +126,20 @@ class DeepDiveLogParser extends JavaTokenParsers { def variable = variableName ^^ { Variable(_, "", 0) } def columnConstant = constant ^^ { Constant(_, "", 0) } def variableOrConstant = columnConstant | variable - def operateOn = operator ~ variableOrConstant ^^ { case (v ~ o) => Operator(v,o) } + def inlineFunction = "!" ~> functionName ~ "(" ~ rep1sep(variableOrConstant, ",") ~ ")" ^^ { + case (name ~ _ ~ args ~ _) => { + InlineFunction(name, args) + } + } + def columnVariable = columnConstant | variable | inlineFunction + def operateOn = operator ~ columnVariable ^^ { case (v ~ o) => Operator(v,o) } def typecast = castOp ~ columnConstant ^^ { case (v ~ o) => Operator(v,o) } def operatorAndOperand = operateOn | typecast - def expression = variableOrConstant ~ rep(operatorAndOperand) ^^ { + def expression = columnVariable ~ rep(operatorAndOperand) ^^ { case (v ~ opList) => { val variables = List(v) ++ (opList map (_.operand)) val ops = opList map (_.operator) - // println(Expression(variables, ops, "", 0)) + // println(variables.mkString) Expression(variables, ops, "", 0) } } @@ -145,6 +152,7 @@ class DeepDiveLogParser extends JavaTokenParsers { val vars = v map { case Variable(x,_,_) => Variable(x,r,i) case Constant(x,_,_) => Constant(x,r,i) + case InlineFunction(x, args) => InlineFunction(x, args) } Expression(vars, op, r, i) } @@ -161,6 +169,7 @@ class DeepDiveLogParser extends JavaTokenParsers { val vars = v map { case Variable(x,_,_) => Variable(x,r,i) case Constant(x,_,_) => Constant(x,r,i) + case InlineFunction(x, args) => InlineFunction(x, args) } // println(Expression(vars, op, r, i)) Expression(vars, op, r, i) @@ -179,7 +188,7 @@ class DeepDiveLogParser extends JavaTokenParsers { } } def conjunctiveCondition = repsep(condition, ",") - def compoundCondition = "[" ~> repsep(conjunctiveCondition, ";") <~ "]" ^^ { CompoundCondition(_) } + def compoundCondition = opt("[") ~> repsep(conjunctiveCondition, ";") <~ opt("]") ^^ { CompoundCondition(_) } def cqBodyWithCondition = cqBody ~ ("," ~> compoundCondition).? ^^ { case (b ~ c) => BodyWithConditions(b, c) } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index e66a8cc14..a56a67525 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -57,18 +57,29 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { else x.value } + case _ => "" + } + } + + def printColumnVar(x: ColumnVariable) = { + x match { + case InlineFunction(name, args) => { + val resolvedArgs = args map printVarOrConst + s"!${name}(${resolvedArgs.mkString(", ")})" + } + case _ => printVarOrConst(x) } } def printExpr(e: Expression, resolve: ColumnVariable => String) = { val resolvedVars = e.variables map (resolve(_)) - resolvedVars(0) + ((e.ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString + resolvedVars(0) + " " + ((e.ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString(" ") } def print(cq: ConjunctiveQuery): String = { val printAtom = {a:Atom => val vars = a.terms map { - case e => printExpr(e, printVarOrConst) + case e => printExpr(e, printColumnVar) } s"${a.name}(${vars.mkString(", ")})" } @@ -76,15 +87,10 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { s"${(a map printAtom).mkString(",\n ")}" } - def printVar(x: ColumnVariable) = { x match { - case Variable(v,_,_) => v - case Constant(v,_,_) => v - } - } val printConjunctiveCondition = {a: List[Condition] => (a map { case Condition(lhs, op, rhs) => - val lhsExpr = printExpr(lhs, printVar) - val rhsExpr = printExpr(rhs, printVar) + val lhsExpr = printExpr(lhs, printColumnVar) + val rhsExpr = printExpr(rhs, printColumnVar) s"${lhsExpr} ${op} ${rhsExpr}" }).mkString(", ") } val conditionList = cq.conditions map { diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index bb0710e7a..20de75800 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -58,9 +58,9 @@ deepdive.extraction.extractors.extraction_rule_3 { sql: """ DROP VIEW IF EXISTS Q; CREATE VIEW Q AS - SELECT 'test':: TEXT, 123, R0.k AS "a.R0.k" + SELECT 'test':: TEXT, 123, R0.k AS "a.R0.k" , unnest(R1.q) FROM a R0, b R1, c R2 - WHERE R1.k = R0.k AND R1.p|| R1.q = R2.s AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) + WHERE R1.k = R0.k AND R2.s = R1.p|| R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) """ style: "sql_extractor" diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index df231a530..2580789a9 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -2,4 +2,4 @@ a(k int). b(k int, p text, q text, r int). c(s text, n int, t text). -Q("test" :: TEXT, 123, id) :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; z < 20, z > 10]. \ No newline at end of file +Q("test" :: TEXT, 123, id, !unnest(y)) :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; z < 20, z > 10]. \ No newline at end of file diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected new file mode 100644 index 000000000..b68126f98 --- /dev/null +++ b/test/expected-output-test/expressions/print.expected @@ -0,0 +1,16 @@ +a(k int). + +b(k int, + p text, + q text, + r int). + +c(s text, + n int, + t text). + +Q( "test" :: TEXT, 123, id, !unnest(y)) :- + a(id), + b(id, x, y, z), + c(x || y, 10, "foo" ), [ z > 100; z < 20, z > 10 ]. + diff --git a/test/test.sh b/test/test.sh index a466452b5..cf48bf7dc 100755 --- a/test/test.sh +++ b/test/test.sh @@ -21,5 +21,4 @@ for t in *.bats.template; do done # run all .bats tests -# bats "$@" *.bats */*.bats -bats "$@" expected*/expressions.bats +bats "$@" *.bats */*.bats From 1296274bc34321f4525e68749aae9c9e98bd3f3b Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 6 Jul 2015 02:34:02 -0700 Subject: [PATCH 146/347] Add distinct --- .../scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 8 ++++---- .../org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala | 5 +++-- .../org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala | 2 +- src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala | 8 ++++---- .../org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala | 2 +- test/expected-output-test/expressions/compile.expected | 2 +- test/expected-output-test/expressions/input.ddl | 2 +- test/expected-output-test/expressions/print.expected | 2 +- 8 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 00e72fea2..261f42b7b 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -411,7 +411,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { for (stmt <- stmts) { // println(DeepDiveLogPrettyPrinter.print(stmt)) for (cqBody <- stmt.q.bodies) { - val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody), stmt.q.conditions) + val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody), stmt.q.conditions, stmt.q.isDistinct) // Generate the body of the query. val qs = new QuerySchema( tmpCq ) @@ -450,9 +450,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, resolveColumnFlag)) } val selectStr = variableCols.mkString(", ") - + val distinctStr = if (tmpCq.isDistinct) " DISTINCT " else "" inputQueries += s""" - SELECT ${selectStr} + SELECT ${distinctStr} ${selectStr} ${ ss.generateSQLBody(tmpCq) }""" } } @@ -528,7 +528,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { for (cqBody <- stmt.q.bodies) { // edge query val fakeBody = stmt.q.head +: cqBody - val fakeCQ = ConjunctiveQuery(stmt.q.head, List(fakeBody), stmt.q.conditions) // we will just use the fakeBody below. + val fakeCQ = ConjunctiveQuery(stmt.q.head, List(fakeBody), stmt.q.conditions, stmt.q.isDistinct) // we will just use the fakeBody below. val index = cqBody.length + 1 val qs2 = new QuerySchema( fakeCQ ) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 3ad56dfb9..1aa958c7e 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -24,6 +24,7 @@ object DeepDiveLogDeltaDeriver{ val newVars = expr.variables map { case term: Variable => term.copy(relName = prefix + term.relName) case term: Constant => term + case term: InlineFunction => term } Expression(newVars, expr.ops, expr.relName, expr.index) } @@ -82,7 +83,7 @@ object DeepDiveLogDeltaDeriver{ } } // TODO fix conditions - ConjunctiveQuery(incCqHead, incCqBodies.toList, incCqConditions.toList) + ConjunctiveQuery(incCqHead, incCqBodies.toList, incCqConditions.toList, cq.isDistinct) } // Incremental scheme declaration, @@ -122,7 +123,7 @@ object DeepDiveLogDeltaDeriver{ incrementalStatement += ExtractionRule(ConjunctiveQuery( Atom(incNewStmt.a.name, incNewExpr), List(List(Atom(stmt.a.name, originalExpr)), List(Atom(incDeltaStmt.a.name, incDeltaExpr))), - List(None, None))) + List(None, None), false)) // } incrementalStatement.toList } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala index d79654968..6ab4d6666 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -24,7 +24,7 @@ object DeepDiveLogMergeDeriver{ val incNewExpr = incNewStmt.a.terms map (variableToExpr(_)) ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, originalExpr), - List(List(Atom(incNewStmt.a.name, incNewExpr))), List(None))) + List(List(Atom(incNewStmt.a.name, incNewExpr))), List(None), false)) } def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index cc801cb51..194b56e86 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -22,7 +22,7 @@ case class Operator(operator: String, operand: ColumnVariable) case class Atom(name : String, terms : List[Expression]) case class Attribute(name : String, terms : List[Variable], types : List[String]) -case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[CompoundCondition]]) +case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[CompoundCondition]], isDistinct: Boolean) case class Column(name : String, t : String) // condition @@ -194,9 +194,9 @@ class DeepDiveLogParser extends JavaTokenParsers { } def conjunctiveQuery : Parser[ConjunctiveQuery] = - cqHead ~ ":-" ~ rep1sep(cqBodyWithCondition, ";") ^^ { - case (headatom ~ ":-" ~ disjunctiveBodies) => - ConjunctiveQuery(headatom, disjunctiveBodies.map(_.body), disjunctiveBodies.map(_.conditions)) + cqHead ~ opt("*") ~ ":-" ~ rep1sep(cqBodyWithCondition, ";") ^^ { + case (headatom ~ isDistinct ~ ":-" ~ disjunctiveBodies) => + ConjunctiveQuery(headatom, disjunctiveBodies.map(_.body), disjunctiveBodies.map(_.conditions), isDistinct != None) } def relationType: Parser[RelationType] = diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index a56a67525..67c51c557 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -105,7 +105,7 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { } }).mkString(";\n ") - s"""${printAtom(cq.head)} :- + s"""${printAtom(cq.head)} ${if (cq.isDistinct) "*" else ""} :- | ${bodyWithCondition}""".stripMargin } diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 20de75800..106dad5b5 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -58,7 +58,7 @@ deepdive.extraction.extractors.extraction_rule_3 { sql: """ DROP VIEW IF EXISTS Q; CREATE VIEW Q AS - SELECT 'test':: TEXT, 123, R0.k AS "a.R0.k" , unnest(R1.q) + SELECT DISTINCT 'test':: TEXT, 123, R0.k AS "a.R0.k" , unnest(R1.q) FROM a R0, b R1, c R2 WHERE R1.k = R0.k AND R2.s = R1.p|| R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) """ diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index 2580789a9..cf6827dc5 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -2,4 +2,4 @@ a(k int). b(k int, p text, q text, r int). c(s text, n int, t text). -Q("test" :: TEXT, 123, id, !unnest(y)) :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; z < 20, z > 10]. \ No newline at end of file +Q("test" :: TEXT, 123, id, !unnest(y)) * :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; z < 20, z > 10]. \ No newline at end of file diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index b68126f98..b37bf8a4b 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -9,7 +9,7 @@ c(s text, n int, t text). -Q( "test" :: TEXT, 123, id, !unnest(y)) :- +Q( "test" :: TEXT, 123, id, !unnest(y)) * :- a(id), b(id, x, y, z), c(x || y, 10, "foo" ), [ z > 100; z < 20, z > 10 ]. From e5bd6753be4af14691b3cbf04cff0dbe9e3b62f0 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 6 Jul 2015 13:04:42 -0700 Subject: [PATCH 147/347] adjust whitespaces --- src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 3 ++- .../scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala | 3 ++- test/expected-output-test/expressions/compile.expected | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 261f42b7b..99c8b7678 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -420,6 +420,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val headTerms = tmpCq.head.terms map { x => DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, OriginalOnly)) } + // println(headTerms) val index = qs.getBodyIndex(stmt.supervision) val name = ss.resolveName(qs.getVar(stmt.supervision)) val labelCol = s"R${index}.${name}" @@ -450,7 +451,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, resolveColumnFlag)) } val selectStr = variableCols.mkString(", ") - val distinctStr = if (tmpCq.isDistinct) " DISTINCT " else "" + val distinctStr = if (tmpCq.isDistinct) "DISTINCT" else "" inputQueries += s""" SELECT ${distinctStr} ${selectStr} ${ ss.generateSQLBody(tmpCq) }""" diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 67c51c557..34d17a8ca 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -73,7 +73,8 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { def printExpr(e: Expression, resolve: ColumnVariable => String) = { val resolvedVars = e.variables map (resolve(_)) - resolvedVars(0) + " " + ((e.ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString(" ") + val rest = ((e.ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString(" ") + resolvedVars(0) + (if (rest != "") " " + rest else "") } def print(cq: ConjunctiveQuery): String = { diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 106dad5b5..31b040301 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -58,9 +58,9 @@ deepdive.extraction.extractors.extraction_rule_3 { sql: """ DROP VIEW IF EXISTS Q; CREATE VIEW Q AS - SELECT DISTINCT 'test':: TEXT, 123, R0.k AS "a.R0.k" , unnest(R1.q) + SELECT DISTINCT 'test' :: TEXT, 123, R0.k AS "a.R0.k" , unnest(R1.q) FROM a R0, b R1, c R2 - WHERE R1.k = R0.k AND R2.s = R1.p|| R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) + WHERE R1.k = R0.k AND R2.s = R1.p || R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) """ style: "sql_extractor" From 3013fa187a138c2c70d58af632c637c50ae688b4 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 6 Jul 2015 20:43:30 -0700 Subject: [PATCH 148/347] Fix alias with expressions in head --- .../org/deepdive/ddlog/DeepDiveLogCompiler.scala | 13 +++++++++++-- .../org/deepdive/ddlog/DeepDiveLogParser.scala | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 99c8b7678..02ffb4884 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -204,6 +204,15 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } } + def resolveExpr(e: Expression, cq: ConjunctiveQuery, alias: AliasStyle) = { + val resolvedVars = e.ops isEmpty match { + case true => e.variables map (resolveColumnVar(_, cq, alias)) + case false => e.variables map (resolveColumnVar(_, cq, OriginalOnly)) + } + val rest = ((e.ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString(" ") + resolvedVars(0) + (if (rest != "") " " + rest else "") + } + // This is generic code that generates the FROM with positional aliasing R0, R1, etc. // and the corresponding WHERE clause (equating all variables) def generateSQLBody(z : ConjunctiveQuery) : String = { @@ -233,7 +242,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C case _ => None } } else { // expression - val expr = DeepDiveLogPrettyPrinter.printExpr(Expression(vars, ops, relName, index), resolveColumnVar(_,z)) + val expr = resolveExpr(Expression(vars, ops, relName, index), z, OriginalOnly) val attr = schema(relName, index) Some(s"R${bodyIndex}.${attr} = ${expr}") } @@ -448,7 +457,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case false => OriginalAndAlias } val variableCols = tmpCq.head.terms map { x => - DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, resolveColumnFlag)) + ss.resolveExpr(x, tmpCq, resolveColumnFlag) } val selectStr = variableCols.mkString(", ") val distinctStr = if (tmpCq.isDistinct) "DISTINCT" else "" diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 194b56e86..3ccd9d4da 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -80,7 +80,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def stringLiteralAsSqlString = stringLiteral ^^ { s => s"""'${s.stripPrefix("\"").stripSuffix("\"")}'""" } - def constant = stringLiteralAsSqlString | wholeNumber | "TRUE" | "FALSE" | "TEXT" | "INT" | "BOOLEAN" + def constant = stringLiteralAsSqlString | wholeNumber | "TRUE" | "FALSE" | "NULL" | "TEXT" | "INT" | "BOOLEAN" // C/Java/Scala-style as well as shell script-style comments are supported // by treating them as whiteSpace From 6190e9bfd783229018f4cf597b7c68b3adf20c91 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 6 Jul 2015 22:06:42 -0700 Subject: [PATCH 149/347] cleanup debug printing --- src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 4 ---- src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala | 2 -- 2 files changed, 6 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 02ffb4884..159a143e8 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -418,7 +418,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { def compileExtractionRules(stmts: List[ExtractionRule], ss: CompilationState): CompiledBlocks = { var inputQueries = new ListBuffer[String]() for (stmt <- stmts) { - // println(DeepDiveLogPrettyPrinter.print(stmt)) for (cqBody <- stmt.q.bodies) { val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody), stmt.q.conditions, stmt.q.isDistinct) // Generate the body of the query. @@ -429,7 +428,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val headTerms = tmpCq.head.terms map { x => DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, OriginalOnly)) } - // println(headTerms) val index = qs.getBodyIndex(stmt.supervision) val name = ss.resolveName(qs.getVar(stmt.supervision)) val labelCol = s"R${index}.${name}" @@ -694,8 +692,6 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // take an initial pass to analyze the parsed program val state = new CompilationState( programToCompile, config ) - // programToCompile foreach {stmt => println(DeepDiveLogPrettyPrinter.print(stmt))} - val body = new ListBuffer[String]() body ++= compileSchemaDeclarations((state.schemaDeclarationGroupByHead map (_._2)).flatten.toList, state) state.extractionRuleGroupByHead foreach {keyVal => body ++= compileExtractionRules(keyVal._2, state)} diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 3ccd9d4da..2f81d7c03 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -139,7 +139,6 @@ class DeepDiveLogParser extends JavaTokenParsers { case (v ~ opList) => { val variables = List(v) ++ (opList map (_.operand)) val ops = opList map (_.operator) - // println(variables.mkString) Expression(variables, ops, "", 0) } } @@ -171,7 +170,6 @@ class DeepDiveLogParser extends JavaTokenParsers { case Constant(x,_,_) => Constant(x,r,i) case InlineFunction(x, args) => InlineFunction(x, args) } - // println(Expression(vars, op, r, i)) Expression(vars, op, r, i) } }) From fd4369aff737e1ddf7ff456a8b11ad3fa569585c Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Tue, 7 Jul 2015 16:21:14 -0700 Subject: [PATCH 150/347] Add a semantic checker The following checks are performed 1. if a relation is defined 2. if a function is defined 3. if the number of columns of a relation match that in the schema 4. if variable relations use reserved columns (id, label) --- examples/smoke.ddl | 2 +- .../org/deepdive/ddlog/DeepDiveLog.scala | 2 +- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 2 + .../ddlog/DeepDiveLogSemanticChecker.scala | 126 ++++++++++++++++++ .../compile-incremental.expected | 8 +- .../compile-materialization.expected | 4 +- .../smoke_example/compile.expected | 4 +- .../smoke_example/print-incremental.expected | 4 +- .../smoke_example/print.expected | 2 +- 9 files changed, 141 insertions(+), 13 deletions(-) create mode 100644 src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala diff --git a/examples/smoke.ddl b/examples/smoke.ddl index 26f28fd37..c3aa5cdd4 100644 --- a/examples/smoke.ddl +++ b/examples/smoke.ddl @@ -35,5 +35,5 @@ cancer(pid) :- person_has_cancer(pid, l) label = l. cancer(pid) :- smoke(pid), person_smokes(pid, l) weight = 0.5. -smoke(pid) :- smoke(pid1), friend(pid1, pid) +smoke(pid) :- smoke(pid1), friends(pid1, pid) weight = 0.4. \ No newline at end of file diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala index eecb7f9a7..db5ad3101 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLog.scala @@ -26,6 +26,7 @@ object DeepDiveLog { head("ddlogc", "0.0.1") cmd("compile") required() action { (_, c) => c.copy(handler = DeepDiveLogCompiler) } cmd("print") required() action { (_, c) => c.copy(handler = DeepDiveLogPrettyPrinter) } + cmd("check") required() action { (_, c) => c.copy(handler = DeepDiveLogSemanticChecker) } opt[Unit]('i', "incremental") optional() action { (_, c) => c.copy(mode = INCREMENTAL) } text("Whether to derive delta rules") opt[Unit]("materialization") optional() action { (_, c) => c.copy(mode = MATERIALIZATION) } text("Whether to materialize origin data") opt[Unit]("merge") optional() action { (_, c) => c.copy(mode = MERGE) } text("Whether to merge delta data") @@ -53,7 +54,6 @@ trait DeepDiveLogHandler { def run(config: DeepDiveLog.Config): Unit = try { // parse each file into a single program val parsedProgram = parseFiles(config.inputFiles) - // run handler with the parsed program run(parsedProgram, config) } catch { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 159a143e8..9666a20a8 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -680,6 +680,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // entry point for compilation override def run(parsedProgram: DeepDiveLog.Program, config: DeepDiveLog.Config) = { + // semantic checking + DeepDiveLogSemanticChecker.run(parsedProgram, config) // determine the program to compile val programToCompile = // derive and compile the program based on mode information diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala new file mode 100644 index 000000000..7468979dc --- /dev/null +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala @@ -0,0 +1,126 @@ +package org.deepdive.ddlog + +import scala.collection.immutable.HashMap +import scala.collection.immutable.HashSet + +// semantic checker for ddlog +object DeepDiveLogSemanticChecker extends DeepDiveLogHandler { + + // initialize the checker + def init(program: DeepDiveLog.Program) { + program foreach { + case s: SchemaDeclaration => { + heads += s.a.name + schemaDeclaration += { s.a.name -> s } + } + case ExtractionRule(q, supervision) => { + heads += q.head.name + } + case InferenceRule(q, weight, semantic, mode) => { + heads += q.head.name + } + case f: FunctionDeclaration => { + functionDeclaration += { f.functionName -> f } + } + case FunctionCallRule(input, output, function) => { + heads += output + } + } + } + + // check a statement + def check(stmt: Statement) { + checkRelationDefined(stmt) + checkFunctionDefined(stmt) + checkVariableRelationSchema(stmt) + checkNumberOfColumns(stmt) + } + + // check if relations in the body are defined + def checkRelationDefined(stmt: Statement) { + val stmtStr = DeepDiveLogPrettyPrinter.print(stmt) + def checkRelation(name: String) { + if (!(heads contains name)) + error(stmt, s"""relation "${name}" is not defined""") + } + stmt match { + case s: ExtractionRule => { + s.q.bodies foreach { x => + x foreach { a => checkRelation(a.name) } + } + } + case s: InferenceRule => { + s.q.bodies foreach { x => + x foreach { a => checkRelation(a.name) } + } + } + case s: FunctionCallRule => checkRelation(s.input) + case _ => + } + } + + // check if a function is defined when it's called + def checkFunctionDefined(stmt: Statement) { + stmt match { + case s: FunctionCallRule => { + if (!(functionDeclaration.keySet contains s.function)) + error(stmt, s"""function "${s.function}" is not defined""") + } + case _ => + } + } + + // check if the user use reserved column names + def checkVariableRelationSchema(stmt: Statement) { + val reservedSet = Set("id", "label") + stmt match { + case SchemaDeclaration(Attribute(r, terms, types), isQuery, vType) => { + if (isQuery) { + terms.foreach { x => + if (reservedSet contains x.varName) + error(stmt, s"""variable relation contains reserved column "${x.varName}" """) + } + } + } + case _ => + } + } + + // check if the number of columns match schema declaration + def checkNumberOfColumns(stmt: Statement) { + def checkAtom(a: Atom) { + if ((schemaDeclaration.keySet contains a.name) && + (a.terms.size != schemaDeclaration(a.name).a.terms.size)) + error(stmt, s""""${a.name}": number of columns in the query does not match number of columns in the schema""") + } + def checkCq(cq: ConjunctiveQuery) { + checkAtom(cq.head) + cq.bodies foreach { x => x foreach checkAtom } + } + stmt match { + case s: ExtractionRule => checkCq(s.q) + case s: InferenceRule => checkCq(s.q) + case _ => + } + } + + // throw exception + def error(stmt: Statement, message: String) { + val stmtStr = DeepDiveLogPrettyPrinter.print(stmt) + throw new RuntimeException(message + s"\n${stmtStr}") + } + + // run the checker + override def run(program: DeepDiveLog.Program, config: DeepDiveLog.Config) = { + init(program) + program foreach check + } + + // schema declaration + var heads : Set[String] = new HashSet[String]() + // schema + var schemaDeclaration : Map[String, SchemaDeclaration] = new HashMap[String, SchemaDeclaration]() + // function declaration + var functionDeclaration : Map[String, FunctionDeclaration] = new HashMap[String, FunctionDeclaration]() + +} \ No newline at end of file diff --git a/test/expected-output-test/smoke_example/compile-incremental.expected b/test/expected-output-test/smoke_example/compile-incremental.expected index 042e044f4..7853ad847 100644 --- a/test/expected-output-test/smoke_example/compile-incremental.expected +++ b/test/expected-output-test/smoke_example/compile-incremental.expected @@ -266,11 +266,11 @@ dd_delta_smoke.label: Boolean deepdive.inference.factors.dd_new_smoke_1 { input_query: """ SELECT R0.id AS "dd_new_smoke.R0.id" , R1.id AS "dd_delta_smoke.R1.id" - FROM dd_new_smoke R0, dd_delta_smoke R1, friend R2 - WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id UNION ALL + FROM dd_new_smoke R0, dd_delta_smoke R1, friends R2 + WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id UNION ALL SELECT R0.id AS "dd_new_smoke.R0.id" , R1.id AS "dd_new_smoke.R1.id" - FROM dd_new_smoke R0, dd_new_smoke R1, dd_delta_friend R2 - WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id """ + FROM dd_new_smoke R0, dd_new_smoke R1, dd_delta_friends R2 + WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id """ function: "Imply(dd_delta_smoke.R1.label, dd_new_smoke.R0.label)" weight: "0.4" } diff --git a/test/expected-output-test/smoke_example/compile-materialization.expected b/test/expected-output-test/smoke_example/compile-materialization.expected index 295dfd0a7..47ab03e5d 100644 --- a/test/expected-output-test/smoke_example/compile-materialization.expected +++ b/test/expected-output-test/smoke_example/compile-materialization.expected @@ -123,8 +123,8 @@ cancer.label: Boolean deepdive.inference.factors.smoke_1 { input_query: """ SELECT R0.id AS "smoke.R0.id" , R1.id AS "smoke.R1.id" - FROM smoke R0, smoke R1, friend R2 - WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id """ + FROM smoke R0, smoke R1, friends R2 + WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id """ function: "Imply(smoke.R1.label, smoke.R0.label)" weight: "0.4" } diff --git a/test/expected-output-test/smoke_example/compile.expected b/test/expected-output-test/smoke_example/compile.expected index 60b6e3112..8a52c5346 100644 --- a/test/expected-output-test/smoke_example/compile.expected +++ b/test/expected-output-test/smoke_example/compile.expected @@ -123,8 +123,8 @@ cancer.label: Boolean deepdive.inference.factors.smoke_1 { input_query: """ SELECT R0.id AS "smoke.R0.id" , R1.id AS "smoke.R1.id" - FROM smoke R0, smoke R1, friend R2 - WHERE R2.pid1 = R1.person_id AND R2.pid = R0.person_id """ + FROM smoke R0, smoke R1, friends R2 + WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id """ function: "Imply(smoke.R1.label, smoke.R0.label)" weight: "0.4" } diff --git a/test/expected-output-test/smoke_example/print-incremental.expected b/test/expected-output-test/smoke_example/print-incremental.expected index 7a52fb60d..3ed640bfc 100644 --- a/test/expected-output-test/smoke_example/print-incremental.expected +++ b/test/expected-output-test/smoke_example/print-incremental.expected @@ -88,9 +88,9 @@ dd_new_cancer(pid) :- dd_new_smoke(pid) :- dd_delta_smoke(pid1), - friend(pid1, pid); + friends(pid1, pid); dd_new_smoke(pid1), - dd_delta_friend(pid1, pid) + dd_delta_friends(pid1, pid) weight = 0.4 semantics = Imply. diff --git a/test/expected-output-test/smoke_example/print.expected b/test/expected-output-test/smoke_example/print.expected index bef8589bc..eccbb0b6f 100644 --- a/test/expected-output-test/smoke_example/print.expected +++ b/test/expected-output-test/smoke_example/print.expected @@ -30,7 +30,7 @@ cancer(pid) :- smoke(pid) :- smoke(pid1), - friend(pid1, pid) + friends(pid1, pid) weight = 0.4 semantics = Imply. From dc57357885723196a9c5017ec52589cf902cbe69 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Tue, 7 Jul 2015 17:37:24 -0700 Subject: [PATCH 151/347] Add support for aggregation --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 28 ++++++++++++++++--- .../deepdive/ddlog/DeepDiveLogParser.scala | 18 ++++++------ .../ddlog/DeepDiveLogPrettyPrinter.scala | 4 +-- .../expressions/compile.expected | 6 +++- .../expressions/input.ddl | 4 ++- .../expressions/print.expected | 6 +++- 6 files changed, 49 insertions(+), 17 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 9666a20a8..9eacbb297 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -196,7 +196,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } } v match { - case InlineFunction(name, args) => { + case InlineFunction(name, args, _) => { val resolvedArgs = args map (resolveVarOrConst(_, OriginalOnly)) s"${name}(${resolvedArgs.mkString(", ")})" } @@ -265,10 +265,30 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C val conditions = conditionList flatMap ( v => if (v != "") Some(s"(${v})") else None) val conditionStr = conditions.mkString(" OR ") - + + // handle group by + // map head terms, leaving out aggregation functions + val groupbyTerms = z.head.terms flatMap { case Expression(vars, ops, relName, index) => + if (ops isEmpty) { + vars(0) match { + case InlineFunction(name, args, a) => if (a) None else Some("") + case Variable(v,r,i) => resolveColumn(v, qs, z, OriginalOnly) + case _ => Some("") + } + } else { + Some("") + } + } + + val groupbyStr = if (groupbyTerms.size == z.head.terms.size) { + "" + } else { + s"\n GROUP BY ${groupbyTerms.mkString(", ")}" + } + var whereClauseStr = whereClause match { - case Nil => if (conditionStr == "") "" else s"WHERE ${conditionStr}" - case _ => s"""WHERE ${whereClause.mkString(" AND ")} ${if (conditionStr == "") "" else s" AND (${conditionStr})"}""" + case Nil => if (conditionStr == "") "" else s"WHERE ${conditionStr}${groupbyStr}" + case _ => s"""WHERE ${whereClause.mkString(" AND ")} ${if (conditionStr == "") "" else s" AND (${conditionStr})"}${groupbyStr}""" } s"""FROM ${ bodyNames } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 2f81d7c03..c0873cff4 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -16,7 +16,7 @@ import scala.util.Try sealed trait ColumnVariable case class Variable(varName : String, relName : String, index : Int ) extends ColumnVariable case class Constant(value : String, relName: String, index: Int) extends ColumnVariable -case class InlineFunction(functionName: String, args: List[ColumnVariable]) extends ColumnVariable +case class InlineFunction(functionName: String, args: List[ColumnVariable], isAggregation: Boolean) extends ColumnVariable case class Expression(variables: List[ColumnVariable], ops: List[String], relName: String, index: Int) case class Operator(operator: String, operand: ColumnVariable) @@ -108,8 +108,8 @@ class DeepDiveLogParser extends JavaTokenParsers { def dataType = CategoricalParser | BooleanParser def schemaDeclaration: Parser[SchemaDeclaration] = - relationName ~ opt("?") ~ opt("!") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ~ opt(dataType) ^^ { - case (r ~ isQuery ~ isDistinct ~ "(" ~ attrs ~ ")" ~ vType) => { + relationName ~ opt("?") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ~ opt(dataType) ^^ { + case (r ~ isQuery ~ "(" ~ attrs ~ ")" ~ vType) => { val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } var types = attrs map { case(x) => x.t } val variableType = vType match { @@ -126,12 +126,14 @@ class DeepDiveLogParser extends JavaTokenParsers { def variable = variableName ^^ { Variable(_, "", 0) } def columnConstant = constant ^^ { Constant(_, "", 0) } def variableOrConstant = columnConstant | variable - def inlineFunction = "!" ~> functionName ~ "(" ~ rep1sep(variableOrConstant, ",") ~ ")" ^^ { + val aggregationFunctions = Set("MAX", "SUM", "MIN") + def inlineFunction = functionName ~ "(" ~ rep1sep(variableOrConstant, ",") ~ ")" ^^ { case (name ~ _ ~ args ~ _) => { - InlineFunction(name, args) + if (aggregationFunctions contains name) InlineFunction(name, args, true) + else InlineFunction(name, args, false) } } - def columnVariable = columnConstant | variable | inlineFunction + def columnVariable = columnConstant | inlineFunction | variable def operateOn = operator ~ columnVariable ^^ { case (v ~ o) => Operator(v,o) } def typecast = castOp ~ columnConstant ^^ { case (v ~ o) => Operator(v,o) } def operatorAndOperand = operateOn | typecast @@ -151,7 +153,7 @@ class DeepDiveLogParser extends JavaTokenParsers { val vars = v map { case Variable(x,_,_) => Variable(x,r,i) case Constant(x,_,_) => Constant(x,r,i) - case InlineFunction(x, args) => InlineFunction(x, args) + case InlineFunction(x, args, a) => InlineFunction(x, args, a) } Expression(vars, op, r, i) } @@ -168,7 +170,7 @@ class DeepDiveLogParser extends JavaTokenParsers { val vars = v map { case Variable(x,_,_) => Variable(x,r,i) case Constant(x,_,_) => Constant(x,r,i) - case InlineFunction(x, args) => InlineFunction(x, args) + case InlineFunction(x, args, _) => InlineFunction(x, args, false) } Expression(vars, op, r, i) } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 34d17a8ca..fe04a460c 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -63,9 +63,9 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { def printColumnVar(x: ColumnVariable) = { x match { - case InlineFunction(name, args) => { + case InlineFunction(name, args, _) => { val resolvedArgs = args map printVarOrConst - s"!${name}(${resolvedArgs.mkString(", ")})" + s"${name}(${resolvedArgs.mkString(", ")})" } case _ => printVarOrConst(x) } diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 31b040301..9ff5adfe6 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -60,7 +60,11 @@ CREATE VIEW Q AS SELECT DISTINCT 'test' :: TEXT, 123, R0.k AS "a.R0.k" , unnest(R1.q) FROM a R0, b R1, c R2 - WHERE R1.k = R0.k AND R2.s = R1.p || R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) + WHERE R1.k = R0.k AND R2.s = R1.p || R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) UNION ALL + SELECT R1.p AS "b.R1.p" , R1.q AS "b.R1.q" , MAX(R1.r) + FROM a R0, b R1 + WHERE R1.k = R0.k + GROUP BY R1.p, R1.q """ style: "sql_extractor" diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index cf6827dc5..0795e99bb 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -2,4 +2,6 @@ a(k int). b(k int, p text, q text, r int). c(s text, n int, t text). -Q("test" :: TEXT, 123, id, !unnest(y)) * :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; z < 20, z > 10]. \ No newline at end of file +Q("test" :: TEXT, 123, id, unnest(y)) * :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; z < 20, z > 10]. + +Q(y, z, MAX(w)) :- a(x), b(x,y,z,w). \ No newline at end of file diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index b37bf8a4b..f6312d0b2 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -9,8 +9,12 @@ c(s text, n int, t text). -Q( "test" :: TEXT, 123, id, !unnest(y)) * :- +Q( "test" :: TEXT, 123, id, unnest(y)) * :- a(id), b(id, x, y, z), c(x || y, 10, "foo" ), [ z > 100; z < 20, z > 10 ]. +Q(y, z, MAX(w)) :- + a(x), + b(x, y, z, w). + From 68ba0999f254e24f217753e6699896ba9656d491 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Tue, 7 Jul 2015 21:25:41 -0700 Subject: [PATCH 152/347] Add test for semantic checker --- test/compile-error-test.bats.template | 21 +++++++++++++++++++ .../column_dismatch/compile-error.expected | 5 +++++ .../column_dismatch/input.ddl | 5 +++++ .../compile-error.expected | 6 ++++++ .../invalid_variable_schema/input.ddl | 5 +++++ .../undefined_function/compile-error.expected | 3 +++ .../undefined_function/input.ddl | 5 +++++ .../undefined_relation/compile-error.expected | 5 +++++ .../undefined_relation/input.ddl | 5 +++++ 9 files changed, 60 insertions(+) create mode 100644 test/compile-error-test.bats.template create mode 100644 test/compile-error-test/column_dismatch/compile-error.expected create mode 100644 test/compile-error-test/column_dismatch/input.ddl create mode 100644 test/compile-error-test/invalid_variable_schema/compile-error.expected create mode 100644 test/compile-error-test/invalid_variable_schema/input.ddl create mode 100644 test/compile-error-test/undefined_function/compile-error.expected create mode 100644 test/compile-error-test/undefined_function/input.ddl create mode 100644 test/compile-error-test/undefined_relation/compile-error.expected create mode 100644 test/compile-error-test/undefined_relation/input.ddl diff --git a/test/compile-error-test.bats.template b/test/compile-error-test.bats.template new file mode 100644 index 000000000..f9af002cc --- /dev/null +++ b/test/compile-error-test.bats.template @@ -0,0 +1,21 @@ +#!/usr/bin/env bats +# Compile error test +# +# The test case here feeds a malformed .ddl into ddlog's print command and compares whether it produces an expected error. + +source bats-template.bash # for $TESTDIR, $it, etc. + +# some preconditions +setup() { + [ -e "$TESTDIR" ] + expectedError="$TESTDIR"/compile-error.expected + [ -e "$expectedError" ] + actualError=${expectedError%.expected}.actual +} + +# check if input produces a compile error +@test "$it compiles input" { + ! ddlog compile "$TESTDIR"/input.ddl >/dev/null 2>"$actualError" + diff "$expectedError" "$actualError" +} + diff --git a/test/compile-error-test/column_dismatch/compile-error.expected b/test/compile-error-test/column_dismatch/compile-error.expected new file mode 100644 index 000000000..06dd1101f --- /dev/null +++ b/test/compile-error-test/column_dismatch/compile-error.expected @@ -0,0 +1,5 @@ +[error] "c": number of columns in the query does not match number of columns in the schema +Q(y, z) :- + a(x), + c(x, y, z, w). + diff --git a/test/compile-error-test/column_dismatch/input.ddl b/test/compile-error-test/column_dismatch/input.ddl new file mode 100644 index 000000000..cf2b01ec4 --- /dev/null +++ b/test/compile-error-test/column_dismatch/input.ddl @@ -0,0 +1,5 @@ +a(k int). +b(k int, p text, q text, r int). +c(s text, n int, t text). + +Q(y, z) :- a(x), c(x,y,z,w). \ No newline at end of file diff --git a/test/compile-error-test/invalid_variable_schema/compile-error.expected b/test/compile-error-test/invalid_variable_schema/compile-error.expected new file mode 100644 index 000000000..6086a2b35 --- /dev/null +++ b/test/compile-error-test/invalid_variable_schema/compile-error.expected @@ -0,0 +1,6 @@ +[error] variable relation contains reserved column "id" +c?(id int, + s text, + n int, + t text). + diff --git a/test/compile-error-test/invalid_variable_schema/input.ddl b/test/compile-error-test/invalid_variable_schema/input.ddl new file mode 100644 index 000000000..d9254a628 --- /dev/null +++ b/test/compile-error-test/invalid_variable_schema/input.ddl @@ -0,0 +1,5 @@ +a(k int). +b(k int, p text, q text, r int). +c?(id int, s text, n int, t text). + +Q(y, z) :- a(x), d(x,y,z,w). \ No newline at end of file diff --git a/test/compile-error-test/undefined_function/compile-error.expected b/test/compile-error-test/undefined_function/compile-error.expected new file mode 100644 index 000000000..90ffa0742 --- /dev/null +++ b/test/compile-error-test/undefined_function/compile-error.expected @@ -0,0 +1,3 @@ +[error] function "ext" is not defined +Q :- !ext(a). + diff --git a/test/compile-error-test/undefined_function/input.ddl b/test/compile-error-test/undefined_function/input.ddl new file mode 100644 index 000000000..27a2a045a --- /dev/null +++ b/test/compile-error-test/undefined_function/input.ddl @@ -0,0 +1,5 @@ +a(k int). +b(k int, p text, q text, r int). +c(s text, n int, t text). + +Q :- !ext(a). \ No newline at end of file diff --git a/test/compile-error-test/undefined_relation/compile-error.expected b/test/compile-error-test/undefined_relation/compile-error.expected new file mode 100644 index 000000000..b8e721de4 --- /dev/null +++ b/test/compile-error-test/undefined_relation/compile-error.expected @@ -0,0 +1,5 @@ +[error] relation "d" is not defined +Q(y, z) :- + a(x), + d(x, y, z, w). + diff --git a/test/compile-error-test/undefined_relation/input.ddl b/test/compile-error-test/undefined_relation/input.ddl new file mode 100644 index 000000000..dde018a26 --- /dev/null +++ b/test/compile-error-test/undefined_relation/input.ddl @@ -0,0 +1,5 @@ +a(k int). +b(k int, p text, q text, r int). +c(s text, n int, t text). + +Q(y, z) :- a(x), d(x,y,z,w). \ No newline at end of file From b5beb82bd7b9d7755d1be3c7258ff3b0e00a8ad4 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Tue, 7 Jul 2015 23:31:06 -0700 Subject: [PATCH 153/347] add endtoend and cleanup pipeline --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 10 ++-- .../chunking_example/compile.expected | 8 +-- .../expressions/compile.expected | 2 + .../ocr_example/compile-incremental.expected | 17 +++--- .../compile-materialization.expected | 5 +- .../ocr_example/compile-merge.expected | 7 +-- .../ocr_example/compile.expected | 6 ++- .../compile-incremental.expected | 25 ++++----- .../compile-materialization.expected | 5 +- .../smoke_example/compile-merge.expected | 9 ++-- .../smoke_example/compile.expected | 14 ++--- .../compile-incremental.expected | 51 +++++++++--------- .../compile-materialization.expected | 13 ++--- .../spouse_example/compile-merge.expected | 11 ++-- .../spouse_example/compile.expected | 14 ++--- .../compile-incremental.expected | 53 ++++++++++--------- .../compile-incremental.expected | 53 ++++++++++--------- .../compile.expected | 16 +++--- 18 files changed, 173 insertions(+), 146 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 9eacbb297..38470eea4 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -420,7 +420,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { } } // Cleanup incremental table extractor - val truncateTableList = (stmts map (x => if ((x.a.name startsWith "dd_new_") && (ss.inferenceRuleGroupByHead contains x.a.name)) "" else s"TRUNCATE ${x.a.name};")).filter(_ != "") + val truncateTableList = (stmts map (x => + if ((x.a.name startsWith "dd_new_") && (ss.inferenceRuleGroupByHead contains x.a.name)) "" + else s"TRUNCATE ${x.a.name};")).filter(_ != "") if (truncateTableList.length > 0) { schemas += s""" deepdive.extraction.extractors.cleanup { @@ -664,15 +666,17 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val extraction_pipeline = if (extraction.length > 0) s"deepdive.pipeline.pipelines.extraction: [${extraction}]" else "" val inference = ((ss.inferenceRuleGroupByHead map (_._2)).flatten map {s => ss.resolveInferenceBlockName(s)}).mkString(", ") val inference_pipeline = if (inference.length > 0) s"deepdive.pipeline.pipelines.inference: [${inference}]" else "" + val endtoend = List(extraction, inference).filter(_ != "").mkString(", ") + val endtoend_pipeline = if (endtoend.length > 0) s"deepdive.pipeline.pipelines.endtoend: [${endtoend}]" else "" val cleanup_pipeline = ss.mode match { - case INCREMENTAL => if (setup_database_pipeline.length > 0) s"deepdive.pipeline.pipelines.cleanup: [cleanup]" else "" + case INCREMENTAL | ORIGINAL => if (setup_database_pipeline.length > 0) s"deepdive.pipeline.pipelines.cleanup: [cleanup]" else "" case _ => "" } val base_dir = ss.mode match { case MATERIALIZATION | INCREMENTAL => "deepdive.pipeline.base_dir: ${BASEDIR}" case _ => "" } - List(run, initdb, extraction_pipeline, inference_pipeline, cleanup_pipeline, base_dir).filter(_ != "") + List(run, initdb, extraction_pipeline, inference_pipeline, endtoend_pipeline, cleanup_pipeline, base_dir).filter(_ != "") } // generate variable schema statements diff --git a/test/expected-output-test/chunking_example/compile.expected b/test/expected-output-test/chunking_example/compile.expected index 1288f1873..2afd5affe 100644 --- a/test/expected-output-test/chunking_example/compile.expected +++ b/test/expected-output-test/chunking_example/compile.expected @@ -72,9 +72,9 @@ deepdive.extraction.extractors.extraction_rule_6 { sql: """ DROP VIEW IF EXISTS ext_features_input; CREATE VIEW ext_features_input AS - SELECT R0.word_id AS "words.R0.word_id" , R0.word AS "words.R0.word" , R0.pos AS "words.R0.pos" , R1.word AS "words.R1.word" , R1.pos AS "words.R1.pos" + SELECT R0.word_id AS "words.R0.word_id" , R0.word AS "words.R0.word" , R0.pos AS "words.R0.pos" , R1.word AS "words.R1.word" , R1.pos AS "words.R1.pos" FROM words R0, words R1 - WHERE R1.sent_id = R0.sent_id + WHERE R1.sent_id = R0.sent_id """ style: "sql_extractor" dependencies: [ "extraction_rule_5" ] @@ -117,7 +117,7 @@ input_query: """ SELECT R0.id AS "tag.R0.id" , R1.feature AS "word_features.R1.feature" FROM tag R0, word_features R1 - WHERE R1.word_id = R0.word_id """ + WHERE R1.word_id = R0.word_id """ function: "Multinomial(tag.R0.label)" weight: "?(word_features.R1.feature)" } @@ -126,3 +126,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_0, extraction_rule_1, extraction_rule_3, extraction_rule_2] deepdive.pipeline.pipelines.extraction: [extraction_rule_6, extraction_rule_9, extraction_rule_8, extraction_rule_5] deepdive.pipeline.pipelines.inference: [tag_0] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_6, extraction_rule_9, extraction_rule_8, extraction_rule_5, tag_0] +deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 9ff5adfe6..88d756923 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -73,3 +73,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2] deepdive.pipeline.pipelines.extraction: [extraction_rule_3] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_3] +deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/ocr_example/compile-incremental.expected b/test/expected-output-test/ocr_example/compile-incremental.expected index c7da34686..1939614bf 100644 --- a/test/expected-output-test/ocr_example/compile-incremental.expected +++ b/test/expected-output-test/ocr_example/compile-incremental.expected @@ -123,10 +123,10 @@ q1.label: Boolean deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_label2; INSERT INTO dd_new_label2 - SELECT R0.wid, R0.val + SELECT R0.wid, R0.val FROM label2 R0 UNION ALL - SELECT R0.wid, R0.val + SELECT R0.wid, R0.val FROM dd_delta_label2 R0 """ @@ -153,10 +153,10 @@ q1.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_label1; INSERT INTO dd_new_label1 - SELECT R0.wid, R0.val + SELECT R0.wid, R0.val FROM label1 R0 UNION ALL - SELECT R0.wid, R0.val + SELECT R0.wid, R0.val FROM dd_delta_label1 R0 """ @@ -180,10 +180,10 @@ q1.label: Boolean deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_features; INSERT INTO dd_new_features - SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val + SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val FROM features R0 UNION ALL - SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val + SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val FROM dd_delta_features R0 """ @@ -223,7 +223,7 @@ q1.label: Boolean input_query: """ SELECT R0.id AS "dd_new_q2.R0.id" , R1.feature_id AS "dd_delta_features.R1.feature_id" FROM dd_new_q2 R0, dd_delta_features R1 - WHERE R1.word_id = R0.wid """ + WHERE R1.word_id = R0.wid """ function: "Imply(dd_new_q2.R0.label)" weight: "?(dd_delta_features.R1.feature_id)" } @@ -233,7 +233,7 @@ q1.label: Boolean input_query: """ SELECT R0.id AS "dd_new_q1.R0.id" , R1.feature_id AS "dd_delta_features.R1.feature_id" FROM dd_new_q1 R0, dd_delta_features R1 - WHERE R1.word_id = R0.wid """ + WHERE R1.word_id = R0.wid """ function: "Imply(dd_new_q1.R0.label)" weight: "?(dd_delta_features.R1.feature_id)" } @@ -242,5 +242,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_10, extraction_rule_5, extraction_rule_14, extraction_rule_6, extraction_rule_1, extraction_rule_17, extraction_rule_9, extraction_rule_2, extraction_rule_13, extraction_rule_18] deepdive.pipeline.pipelines.extraction: [extraction_rule_21, extraction_rule_7, extraction_rule_20, extraction_rule_19, extraction_rule_3, extraction_rule_11, extraction_rule_15] deepdive.pipeline.pipelines.inference: [dd_new_q2_0, dd_new_q1_1] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_21, extraction_rule_7, extraction_rule_20, extraction_rule_19, extraction_rule_3, extraction_rule_11, extraction_rule_15, dd_new_q2_0, dd_new_q1_1] deepdive.pipeline.pipelines.cleanup: [cleanup] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/ocr_example/compile-materialization.expected b/test/expected-output-test/ocr_example/compile-materialization.expected index 1fc5c659c..79b41870d 100644 --- a/test/expected-output-test/ocr_example/compile-materialization.expected +++ b/test/expected-output-test/ocr_example/compile-materialization.expected @@ -106,7 +106,7 @@ q2.label: Boolean input_query: """ SELECT R0.id AS "q1.R0.id" , R1.feature_id AS "features.R1.feature_id" FROM q1 R0, features R1 - WHERE R1.word_id = R0.wid """ + WHERE R1.word_id = R0.wid """ function: "Imply(q1.R0.label)" weight: "?(features.R1.feature_id)" } @@ -116,7 +116,7 @@ q2.label: Boolean input_query: """ SELECT R0.id AS "q2.R0.id" , R1.feature_id AS "features.R1.feature_id" FROM q2 R0, features R1 - WHERE R1.word_id = R0.wid """ + WHERE R1.word_id = R0.wid """ function: "Imply(q2.R0.label)" weight: "?(features.R1.feature_id)" } @@ -125,4 +125,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2, extraction_rule_3, extraction_rule_4] deepdive.pipeline.pipelines.extraction: [extraction_rule_5, extraction_rule_6] deepdive.pipeline.pipelines.inference: [q1_0, q2_1] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_5, extraction_rule_6, q1_0, q2_1] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/ocr_example/compile-merge.expected b/test/expected-output-test/ocr_example/compile-merge.expected index d45ec8193..a6ffd7d42 100644 --- a/test/expected-output-test/ocr_example/compile-merge.expected +++ b/test/expected-output-test/ocr_example/compile-merge.expected @@ -20,7 +20,7 @@ deepdive.extraction.extractors.extraction_rule_2 { sql: """ TRUNCATE label2; INSERT INTO label2 - SELECT R0.wid, R0.val + SELECT R0.wid, R0.val FROM dd_new_label2 R0 """ @@ -32,7 +32,7 @@ deepdive.extraction.extractors.extraction_rule_1 { sql: """ TRUNCATE label1; INSERT INTO label1 - SELECT R0.wid, R0.val + SELECT R0.wid, R0.val FROM dd_new_label1 R0 """ @@ -44,7 +44,7 @@ deepdive.extraction.extractors.extraction_rule_0 { sql: """ TRUNCATE features; INSERT INTO features - SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val + SELECT R0.id, R0.word_id, R0.feature_id, R0.feature_val FROM dd_new_features R0 """ @@ -54,3 +54,4 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.extraction: [extraction_rule_2, extraction_rule_1, extraction_rule_0] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_2, extraction_rule_1, extraction_rule_0] diff --git a/test/expected-output-test/ocr_example/compile.expected b/test/expected-output-test/ocr_example/compile.expected index e41c42804..05d770f85 100644 --- a/test/expected-output-test/ocr_example/compile.expected +++ b/test/expected-output-test/ocr_example/compile.expected @@ -106,7 +106,7 @@ q2.label: Boolean input_query: """ SELECT R0.id AS "q1.R0.id" , R1.feature_id AS "features.R1.feature_id" FROM q1 R0, features R1 - WHERE R1.word_id = R0.wid """ + WHERE R1.word_id = R0.wid """ function: "Imply(q1.R0.label)" weight: "?(features.R1.feature_id)" } @@ -116,7 +116,7 @@ q2.label: Boolean input_query: """ SELECT R0.id AS "q2.R0.id" , R1.feature_id AS "features.R1.feature_id" FROM q2 R0, features R1 - WHERE R1.word_id = R0.wid """ + WHERE R1.word_id = R0.wid """ function: "Imply(q2.R0.label)" weight: "?(features.R1.feature_id)" } @@ -125,3 +125,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2, extraction_rule_3, extraction_rule_4] deepdive.pipeline.pipelines.extraction: [extraction_rule_5, extraction_rule_6] deepdive.pipeline.pipelines.inference: [q1_0, q2_1] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_5, extraction_rule_6, q1_0, q2_1] +deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/smoke_example/compile-incremental.expected b/test/expected-output-test/smoke_example/compile-incremental.expected index 7853ad847..1e0b6a07c 100644 --- a/test/expected-output-test/smoke_example/compile-incremental.expected +++ b/test/expected-output-test/smoke_example/compile-incremental.expected @@ -151,10 +151,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_person_has_cancer; INSERT INTO dd_new_person_has_cancer - SELECT R0.person_id, R0.has_cancer + SELECT R0.person_id, R0.has_cancer FROM person_has_cancer R0 UNION ALL - SELECT R0.person_id, R0.has_cancer + SELECT R0.person_id, R0.has_cancer FROM dd_delta_person_has_cancer R0 """ @@ -166,10 +166,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_person; INSERT INTO dd_new_person - SELECT R0.person_id, R0.name + SELECT R0.person_id, R0.name FROM person R0 UNION ALL - SELECT R0.person_id, R0.name + SELECT R0.person_id, R0.name FROM dd_delta_person R0 """ @@ -223,10 +223,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_person_smokes; INSERT INTO dd_new_person_smokes - SELECT R0.person_id, R0.smokes + SELECT R0.person_id, R0.smokes FROM person_smokes R0 UNION ALL - SELECT R0.person_id, R0.smokes + SELECT R0.person_id, R0.smokes FROM dd_delta_person_smokes R0 """ @@ -238,10 +238,10 @@ dd_delta_smoke.label: Boolean deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE dd_new_friends; INSERT INTO dd_new_friends - SELECT R0.person_id, R0.friend_id + SELECT R0.person_id, R0.friend_id FROM friends R0 UNION ALL - SELECT R0.person_id, R0.friend_id + SELECT R0.person_id, R0.friend_id FROM dd_delta_friends R0 """ @@ -254,10 +254,10 @@ dd_delta_smoke.label: Boolean input_query: """ SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_delta_smoke.R1.id" FROM dd_new_cancer R0, dd_delta_smoke R1, person_smokes R2 - WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id UNION ALL + WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id UNION ALL SELECT R0.id AS "dd_new_cancer.R0.id" , R1.id AS "dd_new_smoke.R1.id" FROM dd_new_cancer R0, dd_new_smoke R1, dd_delta_person_smokes R2 - WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ + WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(dd_delta_smoke.R1.label, dd_new_cancer.R0.label)" weight: "0.5" } @@ -267,10 +267,10 @@ dd_delta_smoke.label: Boolean input_query: """ SELECT R0.id AS "dd_new_smoke.R0.id" , R1.id AS "dd_delta_smoke.R1.id" FROM dd_new_smoke R0, dd_delta_smoke R1, friends R2 - WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id UNION ALL + WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id UNION ALL SELECT R0.id AS "dd_new_smoke.R0.id" , R1.id AS "dd_new_smoke.R1.id" FROM dd_new_smoke R0, dd_new_smoke R1, dd_delta_friends R2 - WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id """ + WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id """ function: "Imply(dd_delta_smoke.R1.label, dd_new_smoke.R0.label)" weight: "0.4" } @@ -279,5 +279,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_13, extraction_rule_17, extraction_rule_6, extraction_rule_1, extraction_rule_2, extraction_rule_5, extraction_rule_18, extraction_rule_21, extraction_rule_22, extraction_rule_9, extraction_rule_10, extraction_rule_14] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_25, extraction_rule_19, extraction_rule_3, extraction_rule_23, extraction_rule_11, extraction_rule_24, extraction_rule_15] deepdive.pipeline.pipelines.inference: [dd_new_cancer_0, dd_new_smoke_1] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_25, extraction_rule_19, extraction_rule_3, extraction_rule_23, extraction_rule_11, extraction_rule_24, extraction_rule_15, dd_new_cancer_0, dd_new_smoke_1] deepdive.pipeline.pipelines.cleanup: [cleanup] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/smoke_example/compile-materialization.expected b/test/expected-output-test/smoke_example/compile-materialization.expected index 47ab03e5d..b9908b20d 100644 --- a/test/expected-output-test/smoke_example/compile-materialization.expected +++ b/test/expected-output-test/smoke_example/compile-materialization.expected @@ -114,7 +114,7 @@ cancer.label: Boolean input_query: """ SELECT R0.id AS "cancer.R0.id" , R1.id AS "smoke.R1.id" FROM cancer R0, smoke R1, person_smokes R2 - WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ + WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(smoke.R1.label, cancer.R0.label)" weight: "0.5" } @@ -124,7 +124,7 @@ cancer.label: Boolean input_query: """ SELECT R0.id AS "smoke.R0.id" , R1.id AS "smoke.R1.id" FROM smoke R0, smoke R1, friends R2 - WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id """ + WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id """ function: "Imply(smoke.R1.label, smoke.R0.label)" weight: "0.4" } @@ -133,4 +133,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_2, extraction_rule_4, extraction_rule_5, extraction_rule_3, extraction_rule_1, extraction_rule_0] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_6] deepdive.pipeline.pipelines.inference: [cancer_0, smoke_1] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_6, cancer_0, smoke_1] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/smoke_example/compile-merge.expected b/test/expected-output-test/smoke_example/compile-merge.expected index 487fc94a3..12622b100 100644 --- a/test/expected-output-test/smoke_example/compile-merge.expected +++ b/test/expected-output-test/smoke_example/compile-merge.expected @@ -20,7 +20,7 @@ deepdive.extraction.extractors.extraction_rule_2 { sql: """ TRUNCATE person_smokes; INSERT INTO person_smokes - SELECT R0.person_id, R0.smokes + SELECT R0.person_id, R0.smokes FROM dd_new_person_smokes R0 """ @@ -32,7 +32,7 @@ deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE friends; INSERT INTO friends - SELECT R0.person_id, R0.friend_id + SELECT R0.person_id, R0.friend_id FROM dd_new_friends R0 """ @@ -44,7 +44,7 @@ deepdive.extraction.extractors.extraction_rule_1 { sql: """ TRUNCATE person_has_cancer; INSERT INTO person_has_cancer - SELECT R0.person_id, R0.has_cancer + SELECT R0.person_id, R0.has_cancer FROM dd_new_person_has_cancer R0 """ @@ -56,7 +56,7 @@ deepdive.extraction.extractors.extraction_rule_0 { sql: """ TRUNCATE person; INSERT INTO person - SELECT R0.person_id, R0.name + SELECT R0.person_id, R0.name FROM dd_new_person R0 """ @@ -66,3 +66,4 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.extraction: [extraction_rule_2, extraction_rule_3, extraction_rule_1, extraction_rule_0] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_2, extraction_rule_3, extraction_rule_1, extraction_rule_0] diff --git a/test/expected-output-test/smoke_example/compile.expected b/test/expected-output-test/smoke_example/compile.expected index 8a52c5346..7f33cc14f 100644 --- a/test/expected-output-test/smoke_example/compile.expected +++ b/test/expected-output-test/smoke_example/compile.expected @@ -88,7 +88,7 @@ cancer.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ - INSERT INTO cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label + INSERT INTO cancer SELECT DISTINCT R0.person_id, 0 as id, R0.has_cancer AS label FROM person_has_cancer R0 @@ -100,7 +100,7 @@ cancer.label: Boolean deepdive.extraction.extractors.extraction_rule_6 { sql: """ - INSERT INTO smoke SELECT DISTINCT R0.person_id, 0 as id, R0.smokes AS label + INSERT INTO smoke SELECT DISTINCT R0.person_id, 0 as id, R0.smokes AS label FROM person_smokes R0 @@ -112,19 +112,19 @@ cancer.label: Boolean deepdive.inference.factors.cancer_0 { input_query: """ - SELECT R0.id AS "cancer.R0.id" , R1.id AS "smoke.R1.id" + SELECT R0.id AS "cancer.R0.id" , R1.id AS "smoke.R1.id" FROM cancer R0, smoke R1, person_smokes R2 - WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ + WHERE R1.person_id = R0.person_id AND R2.person_id = R0.person_id """ function: "Imply(smoke.R1.label, cancer.R0.label)" weight: "0.5" } - + deepdive.inference.factors.smoke_1 { input_query: """ SELECT R0.id AS "smoke.R0.id" , R1.id AS "smoke.R1.id" FROM smoke R0, smoke R1, friends R2 - WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id """ + WHERE R2.person_id = R1.person_id AND R2.friend_id = R0.person_id """ function: "Imply(smoke.R1.label, smoke.R0.label)" weight: "0.4" } @@ -133,3 +133,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_2, extraction_rule_4, extraction_rule_5, extraction_rule_3, extraction_rule_1, extraction_rule_0] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_6] deepdive.pipeline.pipelines.inference: [cancer_0, smoke_1] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_6, cancer_0, smoke_1] +deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/spouse_example/compile-incremental.expected b/test/expected-output-test/spouse_example/compile-incremental.expected index beee5b073..ae12889c9 100644 --- a/test/expected-output-test/spouse_example/compile-incremental.expected +++ b/test/expected-output-test/spouse_example/compile-incremental.expected @@ -171,18 +171,18 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_31 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" dependencies: [ "extraction_rule_15" , "extraction_rule_27" , "extraction_rule_24" , "extraction_rule_7" , "extraction_rule_11" ] @@ -192,10 +192,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_people_mentions; INSERT INTO dd_new_people_mentions - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM people_mentions R0 UNION ALL - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM dd_delta_people_mentions R0 """ @@ -207,10 +207,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_articles; INSERT INTO dd_new_articles - SELECT R0.article_id, R0.text + SELECT R0.article_id, R0.text FROM articles R0 UNION ALL - SELECT R0.article_id, R0.text + SELECT R0.article_id, R0.text FROM dd_delta_articles R0 """ @@ -222,7 +222,7 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_25 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS - SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" FROM dd_delta_sentences R0 """ @@ -234,10 +234,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE dd_new_has_spouse_candidates; INSERT INTO dd_new_has_spouse_candidates - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM has_spouse_candidates R0 UNION ALL - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM dd_delta_has_spouse_candidates R0 """ @@ -249,10 +249,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { sql: """ TRUNCATE dd_new_has_spouse_features; INSERT INTO dd_new_has_spouse_features - SELECT R0.relation_id, R0.feature + SELECT R0.relation_id, R0.feature FROM has_spouse_features R0 UNION ALL - SELECT R0.relation_id, R0.feature + SELECT R0.relation_id, R0.feature FROM dd_delta_has_spouse_features R0 """ @@ -264,10 +264,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_sentences; INSERT INTO dd_new_sentences - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM sentences R0 UNION ALL - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM dd_delta_sentences R0 """ @@ -279,12 +279,12 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM dd_delta_people_mentions R0, people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id UNION ALL - SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" + WHERE R1.sentence_id = R0.sentence_id UNION ALL + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id + WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" dependencies: [ "extraction_rule_24" , "extraction_rule_11" ] @@ -352,10 +352,10 @@ dd_new_has_spouse.label: Boolean input_query: """ SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(dd_new_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } @@ -364,5 +364,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_28, extraction_rule_25, extraction_rule_31, extraction_rule_19, extraction_rule_3, extraction_rule_33, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_15] deepdive.pipeline.pipelines.inference: [dd_new_has_spouse_0] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_28, extraction_rule_25, extraction_rule_31, extraction_rule_19, extraction_rule_3, extraction_rule_33, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_15, dd_new_has_spouse_0] deepdive.pipeline.pipelines.cleanup: [cleanup] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/spouse_example/compile-materialization.expected b/test/expected-output-test/spouse_example/compile-materialization.expected index 706b2bbac..224d48c47 100644 --- a/test/expected-output-test/spouse_example/compile-materialization.expected +++ b/test/expected-output-test/spouse_example/compile-materialization.expected @@ -113,7 +113,7 @@ deepdive.extraction.extractors.extraction_rule_7 { sql: """ DROP VIEW IF EXISTS ext_people_input; CREATE VIEW ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" FROM sentences R0 """ @@ -125,9 +125,9 @@ deepdive.extraction.extractors.extraction_rule_13 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; CREATE VIEW ext_has_spouse_features_input AS - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" dependencies: [ "extraction_rule_9" , "extraction_rule_6" ] @@ -137,9 +137,9 @@ deepdive.extraction.extractors.extraction_rule_10 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; CREATE VIEW ext_has_spouse_input AS - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM people_mentions R0, people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id + WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" dependencies: [ "extraction_rule_6" ] @@ -180,7 +180,7 @@ input_query: """ SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } @@ -189,4 +189,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6] deepdive.pipeline.pipelines.inference: [has_spouse_0] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6, has_spouse_0] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/spouse_example/compile-merge.expected b/test/expected-output-test/spouse_example/compile-merge.expected index 9a972542f..632d934f0 100644 --- a/test/expected-output-test/spouse_example/compile-merge.expected +++ b/test/expected-output-test/spouse_example/compile-merge.expected @@ -20,7 +20,7 @@ deepdive.extraction.extractors.extraction_rule_1 { sql: """ TRUNCATE sentences; INSERT INTO sentences - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM dd_new_sentences R0 """ @@ -32,7 +32,7 @@ deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE has_spouse_candidates; INSERT INTO has_spouse_candidates - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM dd_new_has_spouse_candidates R0 """ @@ -44,7 +44,7 @@ deepdive.extraction.extractors.extraction_rule_0 { sql: """ TRUNCATE articles; INSERT INTO articles - SELECT R0.article_id, R0.text + SELECT R0.article_id, R0.text FROM dd_new_articles R0 """ @@ -56,7 +56,7 @@ deepdive.extraction.extractors.extraction_rule_4 { sql: """ TRUNCATE has_spouse_features; INSERT INTO has_spouse_features - SELECT R0.relation_id, R0.feature + SELECT R0.relation_id, R0.feature FROM dd_new_has_spouse_features R0 """ @@ -68,7 +68,7 @@ deepdive.extraction.extractors.extraction_rule_2 { sql: """ TRUNCATE people_mentions; INSERT INTO people_mentions - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM dd_new_people_mentions R0 """ @@ -78,3 +78,4 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.extraction: [extraction_rule_0, extraction_rule_3, extraction_rule_4, extraction_rule_2, extraction_rule_1] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_0, extraction_rule_3, extraction_rule_4, extraction_rule_2, extraction_rule_1] diff --git a/test/expected-output-test/spouse_example/compile.expected b/test/expected-output-test/spouse_example/compile.expected index 87d478af8..25024387e 100644 --- a/test/expected-output-test/spouse_example/compile.expected +++ b/test/expected-output-test/spouse_example/compile.expected @@ -113,7 +113,7 @@ deepdive.extraction.extractors.extraction_rule_7 { sql: """ DROP VIEW IF EXISTS ext_people_input; CREATE VIEW ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" FROM sentences R0 """ @@ -125,9 +125,9 @@ deepdive.extraction.extractors.extraction_rule_13 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; CREATE VIEW ext_has_spouse_features_input AS - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" dependencies: [ "extraction_rule_9" , "extraction_rule_6" ] @@ -137,9 +137,9 @@ deepdive.extraction.extractors.extraction_rule_10 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; CREATE VIEW ext_has_spouse_input AS - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM people_mentions R0, people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id + WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" dependencies: [ "extraction_rule_6" ] @@ -180,7 +180,7 @@ input_query: """ SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } @@ -189,3 +189,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6] deepdive.pipeline.pipelines.inference: [has_spouse_0] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6, has_spouse_0] +deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected index ca8c73cb5..9b0c86453 100644 --- a/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/compile-incremental.expected @@ -171,18 +171,18 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_31 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" dependencies: [ "extraction_rule_15" , "extraction_rule_27" , "extraction_rule_24" , "extraction_rule_7" , "extraction_rule_11" ] @@ -192,10 +192,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_people_mentions; INSERT INTO dd_new_people_mentions - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM people_mentions R0 UNION ALL - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM dd_delta_people_mentions R0 """ @@ -207,10 +207,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_articles; INSERT INTO dd_new_articles - SELECT R0.article_id, R0.text + SELECT R0.article_id, R0.text FROM articles R0 UNION ALL - SELECT R0.article_id, R0.text + SELECT R0.article_id, R0.text FROM dd_delta_articles R0 """ @@ -222,10 +222,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_25 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" FROM sentences R0 UNION ALL - SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" FROM dd_delta_sentences R0 """ @@ -237,10 +237,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE dd_new_has_spouse_candidates; INSERT INTO dd_new_has_spouse_candidates - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM has_spouse_candidates R0 UNION ALL - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM dd_delta_has_spouse_candidates R0 """ @@ -252,10 +252,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { sql: """ TRUNCATE dd_new_has_spouse_features; INSERT INTO dd_new_has_spouse_features - SELECT R0.relation_id, R0.feature + SELECT R0.relation_id, R0.feature FROM has_spouse_features R0 UNION ALL - SELECT R0.relation_id, R0.feature + SELECT R0.relation_id, R0.feature FROM dd_delta_has_spouse_features R0 """ @@ -267,10 +267,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_sentences; INSERT INTO dd_new_sentences - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM sentences R0 UNION ALL - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM dd_delta_sentences R0 """ @@ -282,12 +282,12 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM dd_delta_people_mentions R0, people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id UNION ALL - SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" + WHERE R1.sentence_id = R0.sentence_id UNION ALL + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id + WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" dependencies: [ "extraction_rule_24" , "extraction_rule_11" ] @@ -355,10 +355,10 @@ dd_new_has_spouse.label: Boolean input_query: """ SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Linear(dd_new_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } @@ -367,5 +367,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_28, extraction_rule_25, extraction_rule_31, extraction_rule_19, extraction_rule_3, extraction_rule_33, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_15] deepdive.pipeline.pipelines.inference: [dd_new_has_spouse_0] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_28, extraction_rule_25, extraction_rule_31, extraction_rule_19, extraction_rule_3, extraction_rule_33, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_15, dd_new_has_spouse_0] deepdive.pipeline.pipelines.cleanup: [cleanup] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected index 3baea1486..be999877f 100644 --- a/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected +++ b/test/expected-output-test/spouse_example_new_inference/compile-incremental.expected @@ -171,18 +171,18 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_31 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_features_input; CREATE VIEW dd_delta_ext_has_spouse_features_input AS - SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + SELECT R0.words AS "dd_delta_sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_delta_sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_delta_has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_delta_has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_delta_people_mentions.R2.start_position" , R2.length AS "dd_delta_people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_delta_people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL - SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id UNION ALL + SELECT R0.words AS "dd_new_sentences.R0.words" , R1.relation_id AS "dd_new_has_spouse_candidates.R1.relation_id" , R2.start_position AS "dd_new_people_mentions.R2.start_position" , R2.length AS "dd_new_people_mentions.R2.length" , R3.start_position AS "dd_delta_people_mentions.R3.start_position" , R3.length AS "dd_delta_people_mentions.R3.length" FROM dd_new_sentences R0, dd_new_has_spouse_candidates R1, dd_new_people_mentions R2, dd_delta_people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" dependencies: [ "extraction_rule_15" , "extraction_rule_27" , "extraction_rule_24" , "extraction_rule_7" , "extraction_rule_11" ] @@ -192,10 +192,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_11 { sql: """ TRUNCATE dd_new_people_mentions; INSERT INTO dd_new_people_mentions - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM people_mentions R0 UNION ALL - SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id + SELECT R0.sentence_id, R0.start_position, R0.length, R0.text, R0.mention_id FROM dd_delta_people_mentions R0 """ @@ -207,10 +207,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_3 { sql: """ TRUNCATE dd_new_articles; INSERT INTO dd_new_articles - SELECT R0.article_id, R0.text + SELECT R0.article_id, R0.text FROM articles R0 UNION ALL - SELECT R0.article_id, R0.text + SELECT R0.article_id, R0.text FROM dd_delta_articles R0 """ @@ -222,7 +222,7 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_25 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_people_input; CREATE VIEW dd_delta_ext_people_input AS - SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" + SELECT R0.sentence_id AS "dd_delta_sentences.R0.sentence_id" , R0.words AS "dd_delta_sentences.R0.words" , R0.ner_tags AS "dd_delta_sentences.R0.ner_tags" FROM dd_delta_sentences R0 """ @@ -234,10 +234,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_15 { sql: """ TRUNCATE dd_new_has_spouse_candidates; INSERT INTO dd_new_has_spouse_candidates - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM has_spouse_candidates R0 UNION ALL - SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true + SELECT R0.person1_id, R0.person2_id, R0.sentence_id, R0.description, R0.relation_id, R0.is_true FROM dd_delta_has_spouse_candidates R0 """ @@ -249,10 +249,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_19 { sql: """ TRUNCATE dd_new_has_spouse_features; INSERT INTO dd_new_has_spouse_features - SELECT R0.relation_id, R0.feature + SELECT R0.relation_id, R0.feature FROM has_spouse_features R0 UNION ALL - SELECT R0.relation_id, R0.feature + SELECT R0.relation_id, R0.feature FROM dd_delta_has_spouse_features R0 """ @@ -264,10 +264,10 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_7 { sql: """ TRUNCATE dd_new_sentences; INSERT INTO dd_new_sentences - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM sentences R0 UNION ALL - SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id + SELECT R0.document_id, R0.sentence, R0.words, R0.lemma, R0.pos_tags, R0.dependencies, R0.ner_tags, R0.sentence_offset, R0.sentence_id FROM dd_delta_sentences R0 """ @@ -279,12 +279,12 @@ dd_new_has_spouse.label: Boolean deepdive.extraction.extractors.extraction_rule_28 { sql: """ DROP VIEW IF EXISTS dd_delta_ext_has_spouse_input; CREATE VIEW dd_delta_ext_has_spouse_input AS - SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + SELECT R0.sentence_id AS "dd_delta_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_delta_people_mentions.R0.mention_id" , R0.text AS "dd_delta_people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM dd_delta_people_mentions R0, people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id UNION ALL - SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" + WHERE R1.sentence_id = R0.sentence_id UNION ALL + SELECT R0.sentence_id AS "dd_new_people_mentions.R0.sentence_id" , R0.mention_id AS "dd_new_people_mentions.R0.mention_id" , R0.text AS "dd_new_people_mentions.R0.text" , R1.mention_id AS "dd_delta_people_mentions.R1.mention_id" , R1.text AS "dd_delta_people_mentions.R1.text" FROM dd_new_people_mentions R0, dd_delta_people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id + WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" dependencies: [ "extraction_rule_24" , "extraction_rule_11" ] @@ -352,10 +352,10 @@ dd_new_has_spouse.label: Boolean input_query: """ SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_delta_has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id UNION ALL SELECT R0.id AS "dd_new_has_spouse.R0.id" , R2.feature AS "dd_delta_has_spouse_features.R2.feature" FROM dd_new_has_spouse R0, dd_new_has_spouse_candidates R1, dd_delta_has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(dd_new_has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } @@ -365,7 +365,7 @@ dd_new_has_spouse.label: Boolean input_query: """ SELECT R0.id AS "dd_new_has_spouse.R0.id" , R1.id AS "dd_new_has_spouse.R1.id" FROM dd_new_has_spouse R0, dd_new_has_spouse R1, dd_new_has_spouse_candidates R2, dd_new_has_spouse_candidates R3 - WHERE R2.relation_id = R0.relation_id AND R3.person1_id = R2.person2_id AND R3.person2_id = R2.person1_id AND R3.relation_id = R1.relation_id """ + WHERE R2.relation_id = R0.relation_id AND R3.person1_id = R2.person2_id AND R3.person2_id = R2.person1_id AND R3.relation_id = R1.relation_id """ function: "Imply(dd_new_has_spouse.R1.label, dd_new_has_spouse.R0.label)" weight: "3.0" } @@ -374,5 +374,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_10, extraction_rule_17, extraction_rule_2, extraction_rule_14, extraction_rule_18, extraction_rule_6, extraction_rule_9, extraction_rule_13, extraction_rule_5, extraction_rule_21, extraction_rule_22] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_28, extraction_rule_25, extraction_rule_31, extraction_rule_19, extraction_rule_3, extraction_rule_33, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_15] deepdive.pipeline.pipelines.inference: [dd_new_has_spouse_0, dd_new_has_spouse_1] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_28, extraction_rule_25, extraction_rule_31, extraction_rule_19, extraction_rule_3, extraction_rule_33, extraction_rule_30, extraction_rule_23, extraction_rule_27, extraction_rule_11, extraction_rule_24, extraction_rule_15, dd_new_has_spouse_0, dd_new_has_spouse_1] deepdive.pipeline.pipelines.cleanup: [cleanup] deepdive.pipeline.base_dir: ${BASEDIR} diff --git a/test/expected-output-test/spouse_example_new_inference/compile.expected b/test/expected-output-test/spouse_example_new_inference/compile.expected index d184ea219..8b7469360 100644 --- a/test/expected-output-test/spouse_example_new_inference/compile.expected +++ b/test/expected-output-test/spouse_example_new_inference/compile.expected @@ -113,7 +113,7 @@ deepdive.extraction.extractors.extraction_rule_7 { sql: """ DROP VIEW IF EXISTS ext_people_input; CREATE VIEW ext_people_input AS - SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" + SELECT R0.sentence_id AS "sentences.R0.sentence_id" , R0.words AS "sentences.R0.words" , R0.ner_tags AS "sentences.R0.ner_tags" FROM sentences R0 """ @@ -125,9 +125,9 @@ deepdive.extraction.extractors.extraction_rule_13 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_features_input; CREATE VIEW ext_has_spouse_features_input AS - SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" + SELECT R0.words AS "sentences.R0.words" , R1.relation_id AS "has_spouse_candidates.R1.relation_id" , R2.start_position AS "people_mentions.R2.start_position" , R2.length AS "people_mentions.R2.length" , R3.start_position AS "people_mentions.R3.start_position" , R3.length AS "people_mentions.R3.length" FROM sentences R0, has_spouse_candidates R1, people_mentions R2, people_mentions R3 - WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id + WHERE R1.sentence_id = R0.sentence_id AND R2.sentence_id = R0.sentence_id AND R2.mention_id = R1.person1_id AND R3.sentence_id = R0.sentence_id AND R3.mention_id = R1.person2_id """ style: "sql_extractor" dependencies: [ "extraction_rule_9" , "extraction_rule_6" ] @@ -137,9 +137,9 @@ deepdive.extraction.extractors.extraction_rule_10 { sql: """ DROP VIEW IF EXISTS ext_has_spouse_input; CREATE VIEW ext_has_spouse_input AS - SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" + SELECT R0.sentence_id AS "people_mentions.R0.sentence_id" , R0.mention_id AS "people_mentions.R0.mention_id" , R0.text AS "people_mentions.R0.text" , R1.mention_id AS "people_mentions.R1.mention_id" , R1.text AS "people_mentions.R1.text" FROM people_mentions R0, people_mentions R1 - WHERE R1.sentence_id = R0.sentence_id + WHERE R1.sentence_id = R0.sentence_id """ style: "sql_extractor" dependencies: [ "extraction_rule_6" ] @@ -180,7 +180,7 @@ input_query: """ SELECT R0.id AS "has_spouse.R0.id" , R2.feature AS "has_spouse_features.R2.feature" FROM has_spouse R0, has_spouse_candidates R1, has_spouse_features R2 - WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ + WHERE R1.relation_id = R0.relation_id AND R2.relation_id = R0.relation_id """ function: "Imply(has_spouse.R0.label)" weight: "?(has_spouse_features.R2.feature)" } @@ -190,7 +190,7 @@ input_query: """ SELECT R0.id AS "has_spouse.R0.id" , R1.id AS "has_spouse.R1.id" FROM has_spouse R0, has_spouse R1, has_spouse_candidates R2, has_spouse_candidates R3 - WHERE R2.relation_id = R0.relation_id AND R3.person1_id = R2.person2_id AND R3.person2_id = R2.person1_id AND R3.relation_id = R1.relation_id """ + WHERE R2.relation_id = R0.relation_id AND R3.person1_id = R2.person2_id AND R3.person2_id = R2.person1_id AND R3.relation_id = R1.relation_id """ function: "Imply(has_spouse.R1.label, has_spouse.R0.label)" weight: "3.0" } @@ -199,3 +199,5 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_3, extraction_rule_5, extraction_rule_0, extraction_rule_4, extraction_rule_2] deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6] deepdive.pipeline.pipelines.inference: [has_spouse_0, has_spouse_1] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_10, extraction_rule_13, extraction_rule_9, extraction_rule_15, extraction_rule_6, has_spouse_0, has_spouse_1] +deepdive.pipeline.pipelines.cleanup: [cleanup] From 7c19a8c7637445aaef6c2e77ea0e1df9dc96d7cc Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Wed, 8 Jul 2015 14:03:04 -0700 Subject: [PATCH 154/347] add alias for aggregation column --- .../scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 9 +++++++-- test/expected-output-test/expressions/compile.expected | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 38470eea4..a00195b37 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -196,9 +196,14 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } } v match { - case InlineFunction(name, args, _) => { + case InlineFunction(name, args, agg) => { val resolvedArgs = args map (resolveVarOrConst(_, OriginalOnly)) - s"${name}(${resolvedArgs.mkString(", ")})" + val resolved = s"${name}(${resolvedArgs.mkString(", ")})" + val aggAlias = if (agg) { args(0) match { + case Variable(v,r,i) => s""" AS "${name}_${resolveColumn(v, qs, cq, OriginalOnly).get}"""" // " + case _ => "" + }} else "" + resolved + aggAlias } case _ => resolveVarOrConst(v, alias) } diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 88d756923..090108a90 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -61,7 +61,7 @@ SELECT DISTINCT 'test' :: TEXT, 123, R0.k AS "a.R0.k" , unnest(R1.q) FROM a R0, b R1, c R2 WHERE R1.k = R0.k AND R2.s = R1.p || R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) UNION ALL - SELECT R1.p AS "b.R1.p" , R1.q AS "b.R1.q" , MAX(R1.r) + SELECT R1.p AS "b.R1.p" , R1.q AS "b.R1.q" , MAX(R1.r) AS "MAX_R1.r" FROM a R0, b R1 WHERE R1.k = R0.k GROUP BY R1.p, R1.q From fa12a6bd3b26972454e744360fe7f1266ca2d1cf Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Wed, 8 Jul 2015 16:28:04 -0700 Subject: [PATCH 155/347] Add bitwise and, is --- src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index c0873cff4..b192669d2 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -120,7 +120,7 @@ class DeepDiveLogParser extends JavaTokenParsers { } } - def operator = "||" | "+" | "-" | "*" | "/" + def operator = "||" | "+" | "-" | "*" | "/" | "&" def castOp = "::" def variable = variableName ^^ { Variable(_, "", 0) } @@ -181,7 +181,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def cqBody: Parser[List[Atom]] = rep1sep(cqBodyAtom, ",") // conditions - def filterOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" + def filterOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" | "IS" | "IS NOT" def condition = expression ~ filterOperator ~ expression ^^ { case (lhs ~ op ~ rhs) => { Condition(lhs, op, rhs) From f22bb145d2728a80539e45adc409ea7cefcf4a05 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Thu, 9 Jul 2015 11:54:59 -0700 Subject: [PATCH 156/347] Fix group by. Add more aggregation functions. Support unknown weight that only depends on inference rule --- .../org/deepdive/ddlog/DeepDiveLogCompiler.scala | 12 +++++++----- .../scala/org/deepdive/ddlog/DeepDiveLogParser.scala | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index a00195b37..dd5614c8a 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -291,10 +291,10 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C s"\n GROUP BY ${groupbyTerms.mkString(", ")}" } - var whereClauseStr = whereClause match { - case Nil => if (conditionStr == "") "" else s"WHERE ${conditionStr}${groupbyStr}" - case _ => s"""WHERE ${whereClause.mkString(" AND ")} ${if (conditionStr == "") "" else s" AND (${conditionStr})"}${groupbyStr}""" - } + var whereClauseStr = (whereClause match { + case Nil => if (conditionStr == "") "" else s"WHERE ${conditionStr}" + case _ => s"""WHERE ${whereClause.mkString(" AND ")} ${if (conditionStr == "") "" else s" AND (${conditionStr})"}""" + }) + groupbyStr s"""FROM ${ bodyNames } ${ whereClauseStr }""" @@ -606,7 +606,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { weight = stmt.weights match { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { - s"""?(${w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, AliasOnly)).mkString(", ")})""" + val weightVar = w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, AliasOnly)).mkString(", ") + if (weightVar == "") "?" + else s"?(${weightVar})" } } } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index b192669d2..4d09d5c7e 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -126,7 +126,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def variable = variableName ^^ { Variable(_, "", 0) } def columnConstant = constant ^^ { Constant(_, "", 0) } def variableOrConstant = columnConstant | variable - val aggregationFunctions = Set("MAX", "SUM", "MIN") + val aggregationFunctions = Set("MAX", "SUM", "MIN", "ARRAY_ACCUM", "ARRAY_AGG") def inlineFunction = functionName ~ "(" ~ rep1sep(variableOrConstant, ",") ~ ")" ^^ { case (name ~ _ ~ args ~ _) => { if (aggregationFunctions contains name) InlineFunction(name, args, true) From 7423bc6d6a9e389514d3b377753665e601a19f01 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Thu, 9 Jul 2015 14:25:34 -0700 Subject: [PATCH 157/347] Fix function call rule in extraction pipeline --- src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala | 5 +++-- test/expected-output-test/chunking_example/compile.expected | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index dd5614c8a..8c3cfe542 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -107,6 +107,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C var inferenceRuleGroupByHead : Map[String, List[InferenceRule]] = new HashMap[String, List[InferenceRule]]() var functionCallRuleGroupByInput : Map[String, List[FunctionCallRule]] = new HashMap[String, List[FunctionCallRule]]() var functionCallRuleGroupByOutput : Map[String, List[FunctionCallRule]] = new HashMap[String, List[FunctionCallRule]]() + var functionCallList : ListBuffer[FunctionCallRule] = new ListBuffer[FunctionCallRule]() def init() = { // generate the statements. @@ -124,7 +125,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C case ExtractionRule(_,_) => () case InferenceRule(_,_,_,_) => () case fdecl : FunctionDeclaration => function_schema += {fdecl.functionName -> fdecl} - case FunctionCallRule(_,_,_) => () + case f: FunctionCallRule => functionCallList += f } groupByHead(statements) analyzeVisible(statements) @@ -329,7 +330,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // Analyze the block visibility among statements def analyzeVisible(statements: List[Statement]) = { extractionRuleGroupByHead foreach {keyVal => visible += keyVal._2(0)} - functionCallRuleGroupByInput foreach {keyVal => visible += keyVal._2(0)} + functionCallList foreach { stmt => visible += stmt } } // Analyze the dependency between statements and construct a graph. diff --git a/test/expected-output-test/chunking_example/compile.expected b/test/expected-output-test/chunking_example/compile.expected index 2afd5affe..f456ed25f 100644 --- a/test/expected-output-test/chunking_example/compile.expected +++ b/test/expected-output-test/chunking_example/compile.expected @@ -124,7 +124,7 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_0, extraction_rule_1, extraction_rule_3, extraction_rule_2] -deepdive.pipeline.pipelines.extraction: [extraction_rule_6, extraction_rule_9, extraction_rule_8, extraction_rule_5] +deepdive.pipeline.pipelines.extraction: [extraction_rule_6, extraction_rule_9, extraction_rule_5, extraction_rule_8] deepdive.pipeline.pipelines.inference: [tag_0] -deepdive.pipeline.pipelines.endtoend: [extraction_rule_6, extraction_rule_9, extraction_rule_8, extraction_rule_5, tag_0] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_6, extraction_rule_9, extraction_rule_5, extraction_rule_8, tag_0] deepdive.pipeline.pipelines.cleanup: [cleanup] From 3d47ee13b7535ee561e34d4d91eed8e766a5ad6c Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Thu, 9 Jul 2015 14:30:46 -0700 Subject: [PATCH 158/347] Add example for debugging --- test.ddl | 107 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 test.ddl diff --git a/test.ddl b/test.ddl new file mode 100644 index 000000000..1fae812df --- /dev/null +++ b/test.ddl @@ -0,0 +1,107 @@ +/*sentences(docid text, sentid text, wordidxs text[], words text[], poses text[], ners text[], lemmas text[], dep_paths text[], dep_parents text[], bounding_boxes text[]). + +sentences_serialized(docid text, sentid text, wordidxs text, words text, poses text, ners text, lemmas text, dep_paths text, dep_parents text, bounding_boxes text). + +documents(docid text, sentids text[], wordidxs text[], words text[], poses text[], ners text[], lemmas text[], dep_paths text[], dep_parents text[], bounding_boxes text[]). + +documents_serialized(docid text, sentids text, wordidxs text, words text, poses text, ners text, lemmas text, dep_paths text, dep_parents text, bounding_boxes text). + +entity_formation_candidate_local (docid text, type text, eid text, entity text, prov text). + +entity_formation_candidate (docid text, type text, eid text, entity text, prov text). + +entity_taxon_candidate_local (docid text, type text, eid text, entity text, author_year text, prov text). + +entity_taxon_candidate (docid text, type text, eid text, entity text, author_year text, prov text). + +entity_location_candidate (docid text, type text, eid text, entity text, prov text). + +entity_temporal_candidate (docid text, type text, eid text, entity text, prov text). + +entity_formation? (docid text, type text, eid text, entity text, prov text). + +entity_taxon? (docid text, type text, eid text, entity text, author_year text, prov text). + +entity_location? (docid text, type text, eid text, entity text, prov text). + +entity_temporal? (docid text, type text, eid text, entity text, prov text). + +relation_candidates (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text, features text). + +relation_formation? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). + +relation_formationtemporal? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). + +relation_formationlocation? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). + +relation_taxonomy? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). + +relation_formation_global? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). + +relation_formationtemporal_global? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). + +relation_formationlocation_global? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). + +relation_taxonomy_global? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). + +ddtables (docid text, tableid text, type text, sentid text). + +interval_containments (formation text, child text, parent text). + +interval_not_that_possible(formation text, interval1 text, interval2 text). + +formation_per_doc(docid text, entity text[], type text[]). + +taxon_per_doc(docid text, entity text[], type text[]). + +document_with_formation_entities(docid text, entities text, types text, sentids text, wordidxs text, words text, poses text, ners text, lemmas text, dep_paths text, dep_parents text, bounding_boxes text). + +document_with_taxon_entities(docid text, entities text, types text, sentids text, wordidxs text, words text, poses text, ners text, lemmas text, dep_paths text, dep_parents text, bounding_boxes text). + + +// Each word in sentence is separated with @@@@@ +sentences_serialized( + docid, + sentid, + array_to_string(wordidxs, "@@@@@"), + array_to_string(words, "@@@@@"), + array_to_string(poses, "@@@@@"), + array_to_string(ners, "@@@@@"), + array_to_string(lemmas, "@@@@@"), + array_to_string(dep_paths, "@@@@@"), + array_to_string(dep_parents, "@@@@@"), + array_to_string(bounding_boxes, "@@@@@")) * + :- sentences(docid, sentid, wordidxs, words, poses, ners, lemmas, dep_paths, dep_parents, bounding_boxes). + +// Intermidate table to generate `documents_serialized`. +documents( + docid, + ARRAY_AGG(sentid), + ARRAY_AGG(wordidxs), + ARRAY_AGG(words), + ARRAY_AGG(poses), + ARRAY_AGG(ners), + ARRAY_AGG(lemmas), + ARRAY_AGG(dep_paths), + ARRAY_AGG(dep_parents), + ARRAY_AGG(bounding_boxes)) * + :- sentences_serialized(docid, sentid, wordidxs, words, poses, ners, lemmas, dep_paths, dep_parents, bounding_boxes). + + +// Each sentence is separated with |||||, Each word in sentence is separated with @@@@@ +documents_serialized( + docid, + array_to_string(sentids, "|||||"), + array_to_string(wordidxs, "|||||"), + array_to_string(words, "|||||"), + array_to_string(poses, "|||||"), + array_to_string(ners, "|||||"), + array_to_string(lemmas, "|||||"), + array_to_string(dep_paths, "|||||"), + array_to_string(dep_parents, "|||||"), + array_to_string(bounding_boxes, "|||||")) + :- documents(docid, sentids, wordidxs, words, poses, ners, lemmas, dep_paths, dep_parents, bounding_boxes). + +/** + * Formation that can be decided by only looking at the phrase itself. + **/ From c803ba78bfeded57445154ed0113d4c5b0afd256 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 9 Jul 2015 17:37:21 -0700 Subject: [PATCH 159/347] Drops support for multi-line /* */ style comments due to causing StackOverflow and having no easy way to fix it. Closes #31 --- .../deepdive/ddlog/DeepDiveLogParser.scala | 6 +- test.ddl | 107 ------------------ test/expected-output-test/comments/input.ddl | 61 ++++++++-- .../nested_multiline_comments/input.ddl | 2 +- .../parse-error.expected | 6 +- 5 files changed, 61 insertions(+), 121 deletions(-) delete mode 100644 test.ddl diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 10cc1caac..34842221c 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -53,9 +53,9 @@ class DeepDiveLogParser extends JavaTokenParsers { s.stripPrefix("\"").stripSuffix("\"")) } - // C/Java/Scala-style as well as shell script-style comments are supported - // by treating them as whiteSpace - protected override val whiteSpace = """(?:(?:^|\s+)#.*|//.*|(?m)/\*(\*(?!/)|[^*])*\*/|\s)+""".r + // Single-line comments beginning with # or // are supported by treating them as whiteSpace + // C/Java/Scala style multi-line comments cannot be easily supported with RegexParsers unless we introduce a dedicated lexer. + protected override val whiteSpace = """(?:(?:^|\s+)#.*|//.*|\s)+""".r // We just use Java identifiers to parse various names def relationName = ident diff --git a/test.ddl b/test.ddl deleted file mode 100644 index 1fae812df..000000000 --- a/test.ddl +++ /dev/null @@ -1,107 +0,0 @@ -/*sentences(docid text, sentid text, wordidxs text[], words text[], poses text[], ners text[], lemmas text[], dep_paths text[], dep_parents text[], bounding_boxes text[]). - -sentences_serialized(docid text, sentid text, wordidxs text, words text, poses text, ners text, lemmas text, dep_paths text, dep_parents text, bounding_boxes text). - -documents(docid text, sentids text[], wordidxs text[], words text[], poses text[], ners text[], lemmas text[], dep_paths text[], dep_parents text[], bounding_boxes text[]). - -documents_serialized(docid text, sentids text, wordidxs text, words text, poses text, ners text, lemmas text, dep_paths text, dep_parents text, bounding_boxes text). - -entity_formation_candidate_local (docid text, type text, eid text, entity text, prov text). - -entity_formation_candidate (docid text, type text, eid text, entity text, prov text). - -entity_taxon_candidate_local (docid text, type text, eid text, entity text, author_year text, prov text). - -entity_taxon_candidate (docid text, type text, eid text, entity text, author_year text, prov text). - -entity_location_candidate (docid text, type text, eid text, entity text, prov text). - -entity_temporal_candidate (docid text, type text, eid text, entity text, prov text). - -entity_formation? (docid text, type text, eid text, entity text, prov text). - -entity_taxon? (docid text, type text, eid text, entity text, author_year text, prov text). - -entity_location? (docid text, type text, eid text, entity text, prov text). - -entity_temporal? (docid text, type text, eid text, entity text, prov text). - -relation_candidates (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text, features text). - -relation_formation? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). - -relation_formationtemporal? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). - -relation_formationlocation? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). - -relation_taxonomy? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). - -relation_formation_global? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). - -relation_formationtemporal_global? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). - -relation_formationlocation_global? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). - -relation_taxonomy_global? (docid text, type text, eid1 text, eid2 text, entity1 text, entity2 text). - -ddtables (docid text, tableid text, type text, sentid text). - -interval_containments (formation text, child text, parent text). - -interval_not_that_possible(formation text, interval1 text, interval2 text). - -formation_per_doc(docid text, entity text[], type text[]). - -taxon_per_doc(docid text, entity text[], type text[]). - -document_with_formation_entities(docid text, entities text, types text, sentids text, wordidxs text, words text, poses text, ners text, lemmas text, dep_paths text, dep_parents text, bounding_boxes text). - -document_with_taxon_entities(docid text, entities text, types text, sentids text, wordidxs text, words text, poses text, ners text, lemmas text, dep_paths text, dep_parents text, bounding_boxes text). - - -// Each word in sentence is separated with @@@@@ -sentences_serialized( - docid, - sentid, - array_to_string(wordidxs, "@@@@@"), - array_to_string(words, "@@@@@"), - array_to_string(poses, "@@@@@"), - array_to_string(ners, "@@@@@"), - array_to_string(lemmas, "@@@@@"), - array_to_string(dep_paths, "@@@@@"), - array_to_string(dep_parents, "@@@@@"), - array_to_string(bounding_boxes, "@@@@@")) * - :- sentences(docid, sentid, wordidxs, words, poses, ners, lemmas, dep_paths, dep_parents, bounding_boxes). - -// Intermidate table to generate `documents_serialized`. -documents( - docid, - ARRAY_AGG(sentid), - ARRAY_AGG(wordidxs), - ARRAY_AGG(words), - ARRAY_AGG(poses), - ARRAY_AGG(ners), - ARRAY_AGG(lemmas), - ARRAY_AGG(dep_paths), - ARRAY_AGG(dep_parents), - ARRAY_AGG(bounding_boxes)) * - :- sentences_serialized(docid, sentid, wordidxs, words, poses, ners, lemmas, dep_paths, dep_parents, bounding_boxes). - - -// Each sentence is separated with |||||, Each word in sentence is separated with @@@@@ -documents_serialized( - docid, - array_to_string(sentids, "|||||"), - array_to_string(wordidxs, "|||||"), - array_to_string(words, "|||||"), - array_to_string(poses, "|||||"), - array_to_string(ners, "|||||"), - array_to_string(lemmas, "|||||"), - array_to_string(dep_paths, "|||||"), - array_to_string(dep_parents, "|||||"), - array_to_string(bounding_boxes, "|||||")) - :- documents(docid, sentids, wordidxs, words, poses, ners, lemmas, dep_paths, dep_parents, bounding_boxes). - -/** - * Formation that can be decided by only looking at the phrase itself. - **/ diff --git a/test/expected-output-test/comments/input.ddl b/test/expected-output-test/comments/input.ddl index f34c68b9d..fddd68683 100644 --- a/test/expected-output-test/comments/input.ddl +++ b/test/expected-output-test/comments/input.ddl @@ -3,14 +3,61 @@ R(a int). S( a int # this is an integer field - , b text // seond one is a text field - , c /* third one is a float field */ float + , b text // second one is a text field + , c float#this_isnt_a_comment ). R(x) :- S(x, y, z). -/* -Multi --line -comments -*/ +# Long block of comments (generated from http://www.lipsum.com/feed) +# +# Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum ut +# elementum nisi, eget blandit ex. Donec dictum quam porta dolor commodo, sed +# porta ipsum pretium. Donec egestas iaculis lacinia. Pellentesque ultricies +# enim eget viverra iaculis. Praesent auctor erat id sem suscipit, quis feugiat +# nulla volutpat. Vestibulum faucibus interdum enim, sed ornare mi iaculis sit +# amet. Sed viverra pellentesque metus nec finibus. Morbi et turpis augue. +# Curabitur luctus, lacus egestas pharetra rutrum, mi velit vestibulum nulla, +# ut porta sem diam et nibh. Nulla finibus felis vel sem elementum pulvinar. +# Morbi congue ante ut elit dapibus, sed sodales diam aliquam. Cras justo +# nulla, dignissim sed justo maximus, aliquam dapibus ex. Donec feugiat +# suscipit arcu eu tempor. +# +# Maecenas lacinia lacus nec nibh tristique, ac vestibulum sem rutrum. Proin +# interdum ex risus, euismod sodales nulla interdum non. Vivamus lectus massa, +# tristique eu convallis id, hendrerit sed quam. Nulla ullamcorper dapibus sem +# id tempus. In imperdiet lacus et odio mattis cursus. Proin a sapien sit amet +# metus cursus finibus eu et elit. Sed ac vehicula nibh. Nam eleifend nunc eget +# magna malesuada, vel accumsan ligula mollis. Maecenas tristique a justo ac +# pulvinar. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices +# posuere cubilia Curae; Suspendisse laoreet a nulla at pellentesque. Sed +# aliquet sapien elit, at vulputate lorem dictum nec. Pellentesque orci ante, +# maximus id ex sed, placerat dignissim erat. Suspendisse id ullamcorper enim. +# +# Praesent cursus auctor laoreet. Nullam vehicula volutpat odio, vitae pulvinar +# elit venenatis in. Aliquam vel maximus lectus, ac commodo velit. Fusce +# consectetur blandit bibendum. Proin elementum ultricies metus sed viverra. +# Aenean vel orci eu metus mattis maximus id quis ipsum. Nullam a velit risus. +# Cras tempus vel massa ut facilisis. Cras semper, velit id euismod lacinia, +# turpis nisl tincidunt ex, eu varius sapien lorem vitae odio. Vivamus sed +# mauris nunc. Etiam et libero at metus aliquet dignissim. Quisque dui velit, +# bibendum eget imperdiet in, sagittis et lorem. Nullam blandit tempor felis et +# congue. Cras nec vehicula ante. +# +# Phasellus ac bibendum purus. Quisque lobortis lobortis porttitor. Fusce ut +# magna lobortis, aliquam ipsum at, venenatis diam. Nullam at egestas risus, ac +# consectetur ante. Integer commodo gravida libero eu ultricies. Mauris justo +# lectus, rutrum ac magna eget, condimentum suscipit nisi. Sed aliquam lacus +# eget ligula convallis, a accumsan turpis interdum. Pellentesque et turpis +# feugiat, elementum justo id, efficitur sem. +# +# Etiam massa orci, placerat quis dui eget, sollicitudin posuere nulla. Lorem +# ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum euismod +# erat in rutrum. Nullam ut magna velit. In aliquet eros a venenatis efficitur. +# Ut dignissim nibh non lectus gravida, id maximus ligula luctus. Pellentesque +# vitae euismod est, at viverra felis. Donec faucibus nisl non nulla tincidunt +# dapibus. Nunc venenatis molestie vulputate. Cum sociis natoque penatibus et +# magnis dis parturient montes, nascetur ridiculus mus. Cras auctor faucibus +# enim nec venenatis. Nulla varius diam mi, egestas luctus sem gravida ac. +# Phasellus hendrerit enim augue, sagittis consequat turpis elementum ac. Nam +# sapien velit, gravida quis sem at, auctor laoreet ex. diff --git a/test/parse-error-test/nested_multiline_comments/input.ddl b/test/parse-error-test/nested_multiline_comments/input.ddl index 82e085022..ea73e476a 100644 --- a/test/parse-error-test/nested_multiline_comments/input.ddl +++ b/test/parse-error-test/nested_multiline_comments/input.ddl @@ -1 +1 @@ -/* Multi-line comments /* unfortunately cannot be nested */ */ +/* Multi-line comments are not supported */ diff --git a/test/parse-error-test/nested_multiline_comments/parse-error.expected b/test/parse-error-test/nested_multiline_comments/parse-error.expected index 366b77ddc..cf167b1ef 100644 --- a/test/parse-error-test/nested_multiline_comments/parse-error.expected +++ b/test/parse-error-test/nested_multiline_comments/parse-error.expected @@ -1,4 +1,4 @@ -[error] parse-error-test/nested_multiline_comments/input.ddl[1.61] failure: string matching regex `\p{javaJavaIdentifierStart}\p{javaJavaIdentifierPart}*' expected but `*' found +[error] parse-error-test/nested_multiline_comments/input.ddl[1.1] failure: string matching regex `\p{javaJavaIdentifierStart}\p{javaJavaIdentifierPart}*' expected but `/' found -/* Multi-line comments /* unfortunately cannot be nested */ */ - ^ +/* Multi-line comments are not supported */ +^ From d9b17cd17c76f4afab76b9871b48b5d9648eb22b Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 9 Jul 2015 17:59:08 -0700 Subject: [PATCH 160/347] Switches Travis settings to use container-based infra --- .travis.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index e2b825e41..9d3dd9d73 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,16 @@ language: scala scala: - "2.11.1" -before_install: - - sudo apt-get install -qq wdiff + +sudo: false # to use container-based infrastructure +addons: + apt: + packages: + - wdiff +cache: + directories: + - $HOME/.ivy2/cache + script: - make test MEASURE_COVERAGE=true after_success: From aa9300084848d3a7a914f0ea64f95d5e7ba4f9de Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 9 Jul 2015 19:36:56 -0700 Subject: [PATCH 161/347] Rewrites Makefile for running individual tests with ONLY= and EXCEPT= make arguments. Rewrites Makefile and test scripts, refactoring SBT targets to scala.mk and BATS part to test/bats.mk --- .travis.yml | 4 +- Makefile | 83 +++++++++---------- scala.mk | 69 +++++++++++++++ test/bats-template.bash | 6 +- test/bats.mk | 37 +++++++++ test/{test.sh => enumerate-tests.sh} | 16 ++-- test/expected-output-test.bats.template | 2 +- test/parse-error-test.bats.template | 2 +- .../parse-error.expected | 2 +- 9 files changed, 159 insertions(+), 62 deletions(-) create mode 100644 scala.mk create mode 100644 test/bats.mk rename test/{test.sh => enumerate-tests.sh} (62%) diff --git a/.travis.yml b/.travis.yml index 9d3dd9d73..25f64fbe8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,7 +11,9 @@ cache: directories: - $HOME/.ivy2/cache +install: + - make test-build script: - - make test MEASURE_COVERAGE=true + - make test after_success: - sbt coverageReport coveralls diff --git a/Makefile b/Makefile index efb9b5f84..a2aa295f5 100644 --- a/Makefile +++ b/Makefile @@ -1,59 +1,50 @@ -# Makefile for DeepDiveLogCompiler +# Makefile for DDlog compiler -SOURCE_DIR = src/main/scala/org/deepdive/ddlog -TARGET_DIR = target/scala-2.10/classes -COVERAGE_DIR = $(TARGET_DIR)/../scoverage-data -TEST_CLASSPATH_CACHE = $(TARGET_DIR)/../dependency-classpath + +### Build & Clean ############################################################# + +# build a standalone jar JAR = ddlog.jar +.PHONY: package +package: $(JAR) +$(JAR): scala-assembly-jar + ln -sfn $(SCALA_ASSEMBLY_JAR) $@ + touch $@ + +.PHONY: clean +clean: scala-clean + # clean test artifacts + rm -f $(JAR) $(wildcard test/*/*/*.actual) + find test/ -name '*.bats' -type l -exec rm -f {} + + +include scala.mk # defines scala-build, scala-test-build, scala-assembly-jar, scala-clean targets + -# test -.PHONY: test -test: $(TARGET_DIR) $(TEST_CLASSPATH_CACHE) - CLASSPATH=$(realpath $<):$(shell cat $(TEST_CLASSPATH_CACHE)) \ -TEST_CLASS_OR_JAR=org.deepdive.ddlog.DeepDiveLog \ -test/test.sh -$(TEST_CLASSPATH_CACHE): build.sbt $(wildcard project/*.sbt) - sbt "export compile:dependency-classpath" | tail -1 >$@ - -SOURCES = $(wildcard $(SOURCE_DIR)/*.scala) -ifndef MEASURE_COVERAGE -$(TARGET_DIR): $(SOURCES) - sbt compile -else -$(TARGET_DIR): $(COVERAGE_DIR) -$(COVERAGE_DIR): $(SOURCES) - # enabling coverage measurement - sbt coverage compile -endif + +### Test ###################################################################### + +# test prep for built classes or assembly jar +test-package: $(JAR) + $(MAKE) test TEST_JAR=1 +ifndef TEST_JAR +test-build: scala-test-build +test: export CLASSPATH = $(shell cat $(SCALA_TEST_CLASSPATH_EXPORTED)) + +else # ifdef TEST_JAR +.PHONY: test-package +test-build: $(JAR) +test: export CLASSPATH = $(realpath $(JAR)) + +endif # TEST_JAR # test coverage report .PHONY: test-coverage coveralls test-coverage: - -$(MAKE) test MEASURE_COVERAGE=true + -$(MAKE) test sbt coverageReport coveralls: test-coverage # submit coverage data to https://coveralls.io/r/HazyResearch/ddlog # (Make sure you have set COVERALLS_REPO_TOKEN=...) sbt coveralls -# test standalone package -.PHONY: test-package -test-package: $(JAR) - CLASSPATH= \ -TEST_CLASS_OR_JAR="-jar $(realpath $(JAR))" \ -test/test.sh - -# build standalone jar -.PHONY: package -package: $(JAR) -$(JAR): $(wildcard $(SOURCE_DIR)/*.scala) - sbt clean assembly - ln -sfn $$(ls -t target/scala-*/*-assembly-*.jar | head -1) $@ - touch $@ - -.PHONY: clean -clean: - sbt clean - # clean test artifacts - rm -f $(JAR) $(TEST_CLASSPATH_CACHE) $(wildcard test/*/*/*.actual) - find test/ -name '*.bats' -type l -exec rm -f {} + +include test/bats.mk # defines test, test-build, test-list targets diff --git a/scala.mk b/scala.mk new file mode 100644 index 000000000..f9737debd --- /dev/null +++ b/scala.mk @@ -0,0 +1,69 @@ +# Makefile for Scala ########################################################## + +# Why do we use Makefile instead of SBT? +# - Makefile can build components written in any language. We use it anyway +# for handling dependency and installation. +# - It's much easier to run integration tests with non-Scala components outside +# SBT or ScalaTest. +# - SBT is slow. +# +# How can Makefile build and test Scala code? +# - By exporting the CLASSPATH and running java directly instead, we can avoid +# SBT's poor command-line interface performance. Note that we try to invoke +# SBT as least as possible through out the Makefile, duplicating some commands. +# - Since there are test-specific classes and dependencies, there has to be two +# CLASSPATHs. +# - When built for tests, the classes are instrumented such that coverage can +# be measured. + +# Some path names for Scala +SCALA_BUILD_FILES = build.sbt $(wildcard project/*.*) +SCALA_MAIN_SOURCES = $(shell find src/main/scala -name '*.scala' 2>/dev/null) +SCALA_MAIN_CLASSES_DIR = target/scala-2.10/classes +SCALA_MAIN_CLASSPATH_EXPORTED = target/scala-2.10/classpath +SCALA_TEST_SOURCES = $(shell find src/test/scala -name '*.scala' 2>/dev/null) +SCALA_TEST_CLASSES_DIR = target/scala-2.10/test-classes +SCALA_TEST_CLASSPATH_EXPORTED = target/scala-2.10/test-classpath +SCALA_COVERAGE_DIR = target/scala-2.10/scoverage-data +SCALA_ASSEMBLY_JAR = target/scala-2.10/*-assembly-*.jar + +# SBT settings +PATH := $(PATH):$(shell pwd)/sbt +SBT_OPTS ?= -Xmx4g -XX:MaxHeapSize=4g -XX:MaxPermSize=4g +export SBT_OPTS + +.PHONY: scala-build +scala-build: $(SCALA_MAIN_CLASSES_DIR) $(SCALA_MAIN_CLASSPATH_EXPORTED) +# How to build main Scala code and export main CLASSPATH +$(SCALA_MAIN_CLASSES_DIR): $(SCALA_MAIN_SOURCES) $(SCALA_BUILD_FILES) + # Compiling Scala code + sbt compile + touch $(SCALA_MAIN_CLASSES_DIR) +$(SCALA_MAIN_CLASSPATH_EXPORTED): $(SCALA_BUILD_FILES) + # Exporting CLASSPATH + sbt --error "export compile:full-classpath" | tee /dev/stderr | \ + tail -1 >$@ + +# How to build test Scala code with coverage and export test CLASSPATH +.PHONY: scala-test-build +scala-test-build: $(SCALA_COVERAGE_DIR) $(SCALA_TEST_CLASSES_DIR) $(SCALA_TEST_CLASSPATH_EXPORTED) +$(SCALA_COVERAGE_DIR) $(SCALA_TEST_CLASSES_DIR): $(SCALA_MAIN_SOURCES) $(SCALA_TEST_SOURCES) $(SCALA_BUILD_FILES) + # Compiling Scala code for test with coverage + sbt coverage compile test:compile + touch $(SCALA_COVERAGE_DIR) $(SCALA_TEST_CLASSES_DIR) +$(SCALA_TEST_CLASSPATH_EXPORTED): $(SCALA_BUILD_FILES) + # Exporting CLASSPATH for tests + sbt --error coverage "export test:full-classpath" | tee /dev/stderr | \ + tail -1 | tee $(SCALA_MAIN_CLASSPATH_EXPORTED) >$@ + +# How to build an assembly jar +.PHONY: scala-assembly-jar +scala-assembly-jar: $(SCALA_ASSEMBLY_JAR) +$(SCALA_ASSEMBLY_JAR): $(SCALA_MAIN_SOURCES) $(SCALA_BUILD_FILES) + sbt clean assembly + +# How to clean +.PHONY: scala-clean +scala-clean: + sbt clean + rm -f $(SCALA_MAIN_CLASSPATH_EXPORTED) $(SCALA_TEST_CLASSPATH_EXPORTED) $(SCALA_ASSEMBLY_JAR) diff --git a/test/bats-template.bash b/test/bats-template.bash index 319ed5004..698afa401 100644 --- a/test/bats-template.bash +++ b/test/bats-template.bash @@ -1,9 +1,6 @@ #!/usr/bin/env bash # Utilities for testing with Bats -# required variables -: ${TEST_CLASS_OR_JAR:?class name or -jar with the path to the ddlog.jar to test} - # some shorthands TESTDIR=${BATS_TEST_FILENAME%.bats} TESTDIR=${TESTDIR#$PWD/} @@ -12,8 +9,9 @@ it="${TEST%.bats}:" # how to invoke ddlog compiler ddlog() { - java $TEST_CLASS_OR_JAR "$@" + java org.deepdive.ddlog.DeepDiveLog "$@" } + # how to diff (prefer wdiff) if type wdiff &>/dev/null; then if ${TRAVIS:-false}; then diff --git a/test/bats.mk b/test/bats.mk new file mode 100644 index 000000000..36f2bb906 --- /dev/null +++ b/test/bats.mk @@ -0,0 +1,37 @@ +# A Makefile fragment for running tests with BATS +# +# Author: Jaeho Shin +# Created: 2015-07-09 + +# some default locations +TEST_ROOT = test +TEST_LIST_COMMAND = $(TEST_ROOT)/enumerate-tests.sh +BATS_ROOT = $(TEST_ROOT)/bats +export TEST_ROOT + +.PHONY: test test-build test-list +# One can fine-tune the list of tests by setting environment variables +# TEST_ONLY and TEST_EXCEPT, or passing the same glob patterns as Make +# arguments ONLY and EXCEPT, e.g.: +# $ make test ONLY+=$(TEST_ROOT)/foo/*.bats EXCEPT+=$(TEST_ROOT)/*/unit_tests.bats +# $ TEST_ONLY=$(TEST_ROOT)/foo/*.bats make test EXCEPT+=$(TEST_ROOT)/*/unit_tests.bats +# $ TEST_ONLY=$(TEST_ROOT)/foo/*.bats TEST_EXCEPT=$(TEST_ROOT)/*/unit_tests.bats make test +TEST_ONLY ?= $(shell $(TEST_LIST_COMMAND)) +TEST_EXCEPT ?= +test: ONLY = $(TEST_ONLY) +test: EXCEPT = $(TEST_EXCEPT) +test: BATS_FILES = $(filter-out $(wildcard $(EXCEPT)),$(wildcard $(ONLY))) +test: $(BATS_ROOT)/bin/bats $(BATS_FILES) test-build + # Running $(shell $(TEST_ROOT)/bats/bin/bats -c $(BATS_FILES)) tests defined in $(words $(BATS_FILES)) .bats files + # To test selectively, run: make test ONLY+=/path/to/bats/files + # To exclude certain tests: make test EXCEPT+=/path/to/bats/files + # For a list of tests, run: make test-list + $(BATS_ROOT)/bin/bats $(BATS_FILES) +test-build: +$(BATS_ROOT)/bin/bats: + git submodule update --init $(BATS_ROOT) +test-list: + @echo "make test \\" + @$(TEST_LIST_COMMAND) | sed 's/$$/ \\/; s/^/ ONLY+=/p; s/^ ONLY+=/ EXCEPT+=/' + @echo " #" + diff --git a/test/test.sh b/test/enumerate-tests.sh similarity index 62% rename from test/test.sh rename to test/enumerate-tests.sh index cf48bf7dc..7baf9b6c8 100755 --- a/test/test.sh +++ b/test/enumerate-tests.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash +# A script for enumerating all .bats files after instantiating any templates set -eu -shopt -s nullglob -cd "$(dirname "$0")" -# make sure bats is available -PATH="$PWD/bats/bin:$PATH" +: ${TEST_ROOT:=test} + +( cd "$(dirname "$0")" # instantiate bats tests templates under its directory for t in *.bats.template; do @@ -14,11 +14,11 @@ for t in *.bats.template; do # create a .bats symlink for each test specification for testSpec in "$testSpecDir"/*; do [[ -d "$testSpec" ]] || continue - testSpec=${testSpec%/input.ddl} batsFile="$testSpec".bats - ln -sfn ../"$t" "$batsFile" + ln -sfn ../"$(basename "$t")" "$batsFile" + echo "$TEST_ROOT/$batsFile" done done +) -# run all .bats tests -bats "$@" *.bats */*.bats +ls "$TEST_ROOT"/*.bats 2>/dev/null diff --git a/test/expected-output-test.bats.template b/test/expected-output-test.bats.template index d37846df3..2f83395c8 100644 --- a/test/expected-output-test.bats.template +++ b/test/expected-output-test.bats.template @@ -4,7 +4,7 @@ # These test cases compares outputs of various modes of ddlog against a .ddl example file with its expected output. # Tests are skipped if no expected output is found. -source bats-template.bash # for $TESTDIR, $it, etc. +source "$BATS_TEST_DIRNAME"/../bats-template.bash # for $TESTDIR, $it, etc. # some preconditions setup() { diff --git a/test/parse-error-test.bats.template b/test/parse-error-test.bats.template index b15cc80d5..35b9020b8 100644 --- a/test/parse-error-test.bats.template +++ b/test/parse-error-test.bats.template @@ -3,7 +3,7 @@ # # The test case here feeds a malformed .ddl into ddlog's print command and compares whether it produces an expected error. -source bats-template.bash # for $TESTDIR, $it, etc. +source "$BATS_TEST_DIRNAME"/../bats-template.bash # for $TESTDIR, $it, etc. # some preconditions setup() { diff --git a/test/parse-error-test/nested_multiline_comments/parse-error.expected b/test/parse-error-test/nested_multiline_comments/parse-error.expected index cf167b1ef..39ddabdaf 100644 --- a/test/parse-error-test/nested_multiline_comments/parse-error.expected +++ b/test/parse-error-test/nested_multiline_comments/parse-error.expected @@ -1,4 +1,4 @@ -[error] parse-error-test/nested_multiline_comments/input.ddl[1.1] failure: string matching regex `\p{javaJavaIdentifierStart}\p{javaJavaIdentifierPart}*' expected but `/' found +[error] test/parse-error-test/nested_multiline_comments/input.ddl[1.1] failure: string matching regex `\p{javaJavaIdentifierStart}\p{javaJavaIdentifierPart}*' expected but `/' found /* Multi-line comments are not supported */ ^ From b10019c21fcfc99ee0542f69c20abd6f34d8c268 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Thu, 9 Jul 2015 20:43:52 -0700 Subject: [PATCH 162/347] Refactor expressions and conditions Allow recursive and nested expressions/conditions --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 157 ++++++++---------- .../ddlog/DeepDiveLogDeltaDeriver.scala | 27 +-- .../ddlog/DeepDiveLogMergeDeriver.scala | 6 +- .../deepdive/ddlog/DeepDiveLogParser.scala | 142 ++++++++-------- .../ddlog/DeepDiveLogPrettyPrinter.scala | 55 +++--- .../expressions/compile.expected | 24 ++- .../expressions/input.ddl | 4 +- .../expressions/print.expected | 4 +- 8 files changed, 201 insertions(+), 218 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 8c3cfe542..cdb9521ac 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -176,7 +176,8 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } // Resolve a column name with alias - def resolveColumn(s: String, qs: QuerySchema, q : ConjunctiveQuery, alias: AliasStyle) : Option[String] = { + def resolveColumn(s: String, q : ConjunctiveQuery, alias: AliasStyle) : Option[String] = { + val qs = new QuerySchema(q) val index = qs.getBodyIndex(s) val name = resolveName(qs.getVar(s)) val relation = q.bodies(0)(index).name @@ -187,36 +188,44 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } } - def resolveColumnVar(v: ColumnVariable, cq: ConjunctiveQuery, alias: AliasStyle = OriginalOnly) = { - val qs = new QuerySchema(cq) - def resolveVarOrConst(x: ColumnVariable, alias: AliasStyle) = { - x match { - case Variable(v,r,i) => resolveColumn(v, qs, cq, alias).get - case Constant(v,r,i) => v - case _ => "" - } - } - v match { - case InlineFunction(name, args, agg) => { - val resolvedArgs = args map (resolveVarOrConst(_, OriginalOnly)) - val resolved = s"${name}(${resolvedArgs.mkString(", ")})" - val aggAlias = if (agg) { args(0) match { - case Variable(v,r,i) => s""" AS "${name}_${resolveColumn(v, qs, cq, OriginalOnly).get}"""" // " - case _ => "" - }} else "" - resolved + aggAlias + // resolve an expression + def resolveExpr(e: Expr, cq: ConjunctiveQuery, alias: AliasStyle, index : Int, isHead: Boolean) : String = { + def recurse(e: Expr, alias: AliasStyle, depth: Int) : String = { + // for columns without a name (constant, function call, binary operator), add an column index alias if necessary + val columnAlias = if (depth == 0 && isHead) s" AS column_${index}" else "" + e match { + case VarExpr(name) => resolveColumn(name, cq, alias).get + case ConstExpr(value) => value + columnAlias + case FuncExpr(function, args, agg) => { + val resolvedArgs = args map (x => recurse(x, OriginalOnly, depth + 1)) + val resolved = s"${function}(${resolvedArgs.mkString(", ")})" + resolved + columnAlias + } + case BinaryOpExpr(lhs, op, rhs) => { + val resovledLhs = recurse(lhs, OriginalOnly, depth + 1) + val resovledRhs = recurse(rhs, OriginalOnly, depth + 1) + s"${resovledLhs} ${op} ${resovledRhs}${columnAlias}" + } } - case _ => resolveVarOrConst(v, alias) } + recurse(e, alias, 0) } - def resolveExpr(e: Expression, cq: ConjunctiveQuery, alias: AliasStyle) = { - val resolvedVars = e.ops isEmpty match { - case true => e.variables map (resolveColumnVar(_, cq, alias)) - case false => e.variables map (resolveColumnVar(_, cq, OriginalOnly)) + // resolve a condition + def resolveCond(cond: Cond, cq: ConjunctiveQuery) : String = { + cond match { + case ComparisonCond(lhs, op, rhs) => + s"${resolveExpr(lhs, cq, OriginalOnly, 0, false)} ${op} ${resolveExpr(rhs, cq, OriginalOnly, 0, false)}" + case NegationCond(c) => s"NOT (${resolveCond(c, cq)})" + case BinaryOpCond(lhs, op, rhs) => { + val resolvedLhs = s"${resolveCond(lhs, cq)}" + val resolvedRhs = s"${resolveCond(rhs, cq)}" + op match { + case LogicOperator.AND => s"(${resolvedLhs}) AND (${resolvedRhs})" + case LogicOperator.OR => s"(${resolvedLhs}) OR (${resolvedRhs})" + } + } } - val rest = ((e.ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString(" ") - resolvedVars(0) + (if (rest != "") " " + rest else "") } // This is generic code that generates the FROM with positional aliasing R0, R1, etc. @@ -229,60 +238,40 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C var whereClause = z.bodies(0).zipWithIndex flatMap { case (Atom(relName, terms),bodyIndex) => { - terms flatMap { case Expression(vars, ops, relName, index) => - // simple variable - val simpleVar = (ops isEmpty) && (vars(0) match { - case x: Variable => true - case _ => false - }) - if (simpleVar) { - vars(0) match { - case Variable(varName,relName,index) => { - val canonical_body_index = qs.getBodyIndex(varName) - if (canonical_body_index != bodyIndex) { - val real_attr_name1 = resolveName( Variable(varName, relName, index) ) - val real_attr_name2 = resolveName( qs.getVar(varName)) - Some(s"R${ bodyIndex }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") - } else { None } - } - case _ => None + terms flatMap { case ColumnExpr(expr, relName, index) => + expr match { + // a simple variable + case VarExpr(varName) => { + val canonical_body_index = qs.getBodyIndex(varName) + if (canonical_body_index != bodyIndex) { + val real_attr_name1 = resolveName( Variable(varName, relName, index) ) + val real_attr_name2 = resolveName( qs.getVar(varName)) + Some(s"R${ bodyIndex }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") + } else { None } + } + // other expressions + case _ => { + val resolved = resolveExpr(expr, z, OriginalOnly, index, false) + val attr = schema(relName, index) + Some(s"R${bodyIndex}.${attr} = ${resolved}") } - } else { // expression - val expr = resolveExpr(Expression(vars, ops, relName, index), z, OriginalOnly) - val attr = schema(relName, index) - Some(s"R${bodyIndex}.${attr} = ${expr}") } } } } // resolve conditions - val conditionList = z.conditions(0) match { - case Some(c) => c.conditions map { case x: List[Condition] => - val inner = x map { case Condition(lhs, op, rhs) => - val lhsExpr = DeepDiveLogPrettyPrinter.printExpr(lhs, resolveColumnVar(_,z)) - val rhsExpr = DeepDiveLogPrettyPrinter.printExpr(rhs, resolveColumnVar(_,z)) - s"${lhsExpr} ${op} ${rhsExpr}" - } - inner.mkString(" AND ") - } - case None => List("") + val conditionStr = z.conditions(0) match { + case Some(c) => resolveCond(c, z) + case None => "" } - val conditions = conditionList flatMap ( v => if (v != "") Some(s"(${v})") else None) - val conditionStr = conditions.mkString(" OR ") - // handle group by // map head terms, leaving out aggregation functions - val groupbyTerms = z.head.terms flatMap { case Expression(vars, ops, relName, index) => - if (ops isEmpty) { - vars(0) match { - case InlineFunction(name, args, a) => if (a) None else Some("") - case Variable(v,r,i) => resolveColumn(v, qs, z, OriginalOnly) - case _ => Some("") - } - } else { - Some("") + val groupbyTerms = z.head.terms map { case ColumnExpr(expr, relName, index) => + expr match { + case FuncExpr(f, args, agg) => if (agg) None else Some("") + case _ => Some(resolveExpr(expr, z, OriginalOnly, index, false)) } } @@ -371,14 +360,12 @@ class QuerySchema(q : ConjunctiveQuery) { def generateCanonicalVar() = { q.bodies(0).zipWithIndex.foreach { case (Atom(relName,terms),index) => { - terms.foreach { case Expression(c, op, r, i) => - if (op.isEmpty) { - c(0) match { - case Variable(v,r,i) => - if (! (query_schema contains v) ) - query_schema += { v -> (index, Variable(v,r,i) ) } - case _ => - } + terms.foreach { case ColumnExpr(expr, r, i) => + expr match { + case VarExpr(v) => + if (! (query_schema contains v) ) + query_schema += { v -> (index, Variable(v,r,i) ) } + case _ => } } } @@ -453,8 +440,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { if (stmt.supervision != null) { if (stmt.q.bodies.length > 1) ss.error(s"Scoping rule does not allow disjunction.\n") - val headTerms = tmpCq.head.terms map { x => - DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, OriginalOnly)) + val headTerms = tmpCq.head.terms map { case ColumnExpr(expr, _, index) => + ss.resolveExpr(expr, tmpCq, OriginalOnly, index, true) } val index = qs.getBodyIndex(stmt.supervision) val name = ss.resolveName(qs.getVar(stmt.supervision)) @@ -464,8 +451,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { ${ ss.generateSQLBody(tmpCq) } """ } else if ((ss.schemaDeclarationGroupByHead contains stmt.q.head.name) && (ss.schemaDeclarationGroupByHead(stmt.q.head.name)(0).isQuery) && (stmt.q.head.name startsWith "dd_new_")) { - val headTerms = tmpCq.head.terms map { x => - DeepDiveLogPrettyPrinter.printExpr(x, ss.resolveColumnVar(_, tmpCq, OriginalOnly)) + val headTerms = tmpCq.head.terms map { case ColumnExpr(expr, _, index) => + ss.resolveExpr(expr, tmpCq, OriginalOnly, index, true) } val headTermsStr = ( headTerms :+ "id" ).mkString(", ") inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, label @@ -482,8 +469,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case true => OriginalOnly case false => OriginalAndAlias } - val variableCols = tmpCq.head.terms map { x => - ss.resolveExpr(x, tmpCq, resolveColumnFlag) + val variableCols = tmpCq.head.terms map { case ColumnExpr(expr, _, index) => + ss.resolveExpr(expr, tmpCq, resolveColumnFlag, index, true) } val selectStr = variableCols.mkString(", ") val distinctStr = if (tmpCq.isDistinct) "DISTINCT" else "" @@ -579,7 +566,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // weight string val uwStr = stmt.weights match { case KnownFactorWeight(x) => None - case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, OriginalAndAlias)).mkString(", ")) + case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, fakeCQ, OriginalAndAlias)).mkString(", ")) } val selectStr = (List(variableIdsStr, uwStr) flatten).mkString(", ") @@ -607,7 +594,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { weight = stmt.weights match { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { - val weightVar = w.flatMap(s => ss.resolveColumn(s, qs2, fakeCQ, AliasOnly)).mkString(", ") + val weightVar = w.flatMap(s => ss.resolveColumn(s, fakeCQ, AliasOnly)).mkString(", ") if (weightVar == "") "?" else s"?(${weightVar})" } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 1aa958c7e..d250fcbc2 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -19,45 +19,36 @@ object DeepDiveLogDeltaDeriver{ } def transform(cq: ConjunctiveQuery, isInference: Boolean, mode: String): ConjunctiveQuery = { - // transform an expression - def transformExpression(expr: Expression, prefix: String) = { - val newVars = expr.variables map { - case term: Variable => term.copy(relName = prefix + term.relName) - case term: Constant => term - case term: InlineFunction => term - } - Expression(newVars, expr.ops, expr.relName, expr.index) - } // New head val incCqHead = if (isInference) { cq.head.copy( name = newPrefix + cq.head.name, - terms = cq.head.terms map (transformExpression(_, newPrefix)) + terms = cq.head.terms ) } else { cq.head.copy( name = deltaPrefix + cq.head.name, - terms = cq.head.terms map (transformExpression(_, deltaPrefix)) + terms = cq.head.terms ) } var incCqBodies = new ListBuffer[List[Atom]]() - var incCqConditions = new ListBuffer[Option[CompoundCondition]]() + var incCqConditions = new ListBuffer[Option[Cond]]() // New incremental bodies cq.bodies zip cq.conditions foreach { case (body, cond) => // Delta body val incDeltaBody = body map { a => a.copy( name = deltaPrefix + a.name, - terms = a.terms map (transformExpression(_, deltaPrefix)) + terms = a.terms ) } // New body val incNewBody = body map { a => a.copy( name = newPrefix + a.name, - terms = a.terms map (transformExpression(_, newPrefix)) + terms = a.terms ) } var i = 0 @@ -114,10 +105,10 @@ object DeepDiveLogDeltaDeriver{ incrementalStatement += incNewStmt // from schema declaration to expressions - def variableToExpr(v: Variable) = Expression(List(v), List(), v.relName, v.index) - val originalExpr = stmt.a.terms map (variableToExpr(_)) - val incDeltaExpr = incDeltaStmt.a.terms map (variableToExpr(_)) - val incNewExpr = incNewStmt.a.terms map (variableToExpr(_)) + def variableToExpr(v: Variable) = ColumnExpr(VarExpr(v.varName), v.relName, v.index) + val originalExpr = stmt.a.terms map variableToExpr + val incDeltaExpr = incDeltaStmt.a.terms map variableToExpr + val incNewExpr = incNewStmt.a.terms map variableToExpr // if (!stmt.isQuery) { incrementalStatement += ExtractionRule(ConjunctiveQuery( diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala index 6ab4d6666..cbc73c0c3 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -19,9 +19,9 @@ object DeepDiveLogMergeDeriver{ ) ) - def variableToExpr(v: Variable) = Expression(List(v), List(), v.relName, v.index) - val originalExpr = stmt.a.terms map (variableToExpr(_)) - val incNewExpr = incNewStmt.a.terms map (variableToExpr(_)) + def variableToExpr(v: Variable) = ColumnExpr(VarExpr(v.varName), v.relName, v.index) + val originalExpr = stmt.a.terms map variableToExpr + val incNewExpr = incNewStmt.a.terms map variableToExpr ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, originalExpr), List(List(Atom(incNewStmt.a.name, incNewExpr))), List(None), false)) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 4d09d5c7e..88beb4d0b 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -15,21 +15,34 @@ import scala.util.Try // ddlog column variable type: constant or variable sealed trait ColumnVariable case class Variable(varName : String, relName : String, index : Int ) extends ColumnVariable -case class Constant(value : String, relName: String, index: Int) extends ColumnVariable -case class InlineFunction(functionName: String, args: List[ColumnVariable], isAggregation: Boolean) extends ColumnVariable + case class Expression(variables: List[ColumnVariable], ops: List[String], relName: String, index: Int) +case class ColumnExpr(expr: Expr, relName: String, index: Int) case class Operator(operator: String, operand: ColumnVariable) -case class Atom(name : String, terms : List[Expression]) +sealed trait Expr +case class VarExpr(name: String) extends Expr +case class ConstExpr(value: String) extends Expr +case class FuncExpr(function: String, args: List[Expr], isAggregation: Boolean) extends Expr +case class BinaryOpExpr(lhs: Expr, op: String, rhs: Expr) extends Expr + +case class Atom(name : String, terms : List[ColumnExpr]) case class Attribute(name : String, terms : List[Variable], types : List[String]) -case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[CompoundCondition]], isDistinct: Boolean) +case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[Cond]], isDistinct: Boolean) case class Column(name : String, t : String) +case class BodyWithCondition(body: List[Atom], condition: Option[Cond]) // condition -case class BodyWithConditions(body: List[Atom], conditions: Option[CompoundCondition]) - -case class Condition(lhs: Expression, op: String, rhs: Expression) -case class CompoundCondition(conditions: List[List[Condition]]) +sealed trait Cond +case class ComparisonCond(lhs: Expr, op: String, rhs: Expr) extends Cond +case class NegationCond(cond: Cond) extends Cond +case class BinaryOpCond(lhs: Cond, op: LogicOperator.LogicOperator, rhs: Cond) extends Cond + +// logic operators +object LogicOperator extends Enumeration { + type LogicOperator = Value + val AND, OR = Value +} // variable type sealed trait VariableType { @@ -66,7 +79,6 @@ case class ExtractionRule(q : ConjunctiveQuery, supervision: String = null) exte case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, semantics : String = "Imply", mode: String = null) extends Statement // Weighted rule - // Parser class DeepDiveLogParser extends JavaTokenParsers { @@ -120,84 +132,74 @@ class DeepDiveLogParser extends JavaTokenParsers { } } - def operator = "||" | "+" | "-" | "*" | "/" | "&" - def castOp = "::" - - def variable = variableName ^^ { Variable(_, "", 0) } - def columnConstant = constant ^^ { Constant(_, "", 0) } - def variableOrConstant = columnConstant | variable + def operator = "||" | "+" | "-" | "*" | "/" | "&" | "::" val aggregationFunctions = Set("MAX", "SUM", "MIN", "ARRAY_ACCUM", "ARRAY_AGG") - def inlineFunction = functionName ~ "(" ~ rep1sep(variableOrConstant, ",") ~ ")" ^^ { - case (name ~ _ ~ args ~ _) => { - if (aggregationFunctions contains name) InlineFunction(name, args, true) - else InlineFunction(name, args, false) - } - } - def columnVariable = columnConstant | inlineFunction | variable - def operateOn = operator ~ columnVariable ^^ { case (v ~ o) => Operator(v,o) } - def typecast = castOp ~ columnConstant ^^ { case (v ~ o) => Operator(v,o) } - def operatorAndOperand = operateOn | typecast - def expression = columnVariable ~ rep(operatorAndOperand) ^^ { - case (v ~ opList) => { - val variables = List(v) ++ (opList map (_.operand)) - val ops = opList map (_.operator) - Expression(variables, ops, "", 0) - } - } + + // expressions + def expr : Parser[Expr] = + ( lexpr ~ operator ~ expr ^^ { case (lhs ~ op ~ rhs) => BinaryOpExpr(lhs, op, rhs) } + | lexpr + ) + + def lexpr : Parser[Expr] = + ( functionName ~ "(" ~ rep1sep(expr, ",") ~ ")" ^^ { + case (name ~ _ ~ args ~ _) => FuncExpr(name, args, (aggregationFunctions contains name)) + } + | constant ^^ { ConstExpr(_) } + | variableName ^^ { VarExpr(_) } + | "(" ~> expr <~ ")" + ) // TODO support aggregate function syntax somehow - def cqHead = relationName ~ "(" ~ repsep(expression, ",") ~ ")" ^^ { + def cqHead = relationName ~ "(" ~ rep1sep(expr, ",") ~ ")" ^^ { case (r ~ "(" ~ variableUses ~ ")") => Atom(r, variableUses.zipWithIndex map { - case (Expression(v,op,_,_),i) => { - val vars = v map { - case Variable(x,_,_) => Variable(x,r,i) - case Constant(x,_,_) => Constant(x,r,i) - case InlineFunction(x, args, a) => InlineFunction(x, args, a) - } - Expression(vars, op, r, i) - } + case (e,i) => ColumnExpr(e, r, i) }) } - // TODO add conditional expressions for where clause - def cqConditionalExpr = failure("No conditional expression supported yet") - def cqBodyAtom: Parser[Atom] = - ( relationName ~ "(" ~ repsep(expression, ",") ~ ")" ^^ { - case (r ~ "(" ~ variableBindings ~ ")") => - Atom(r, variableBindings.zipWithIndex map { - case (Expression(v,op,_,_),i) => { - val vars = v map { - case Variable(x,_,_) => Variable(x,r,i) - case Constant(x,_,_) => Constant(x,r,i) - case InlineFunction(x, args, _) => InlineFunction(x, args, false) - } - Expression(vars, op, r, i) - } - }) + // conditional expressions + def compareOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" | "IS" | "IS NOT" + def cond : Parser[Cond] = + ( acond ~ (";") ~ cond ^^ { case (lhs ~ op ~ rhs) => + BinaryOpCond(lhs, LogicOperator.OR, rhs) } - | cqConditionalExpr + | acond + ) + def acond : Parser[Cond] = + ( lcond ~ (",") ~ acond ^^ { case (lhs ~ op ~ rhs) => BinaryOpCond(lhs, LogicOperator.AND, rhs) } + | lcond + ) + // ! has higher priority... + def lcond : Parser[Cond] = + ( "!" ~> bcond ^^ { NegationCond(_) } + | bcond + ) + def bcond : Parser[Cond] = + ( expr ~ compareOperator ~ expr ^^ { case (lhs ~ op ~ rhs) => + ComparisonCond(lhs, op, rhs) + } + | "[" ~> cond <~ "]" ) - def cqBody: Parser[List[Atom]] = rep1sep(cqBodyAtom, ",") - // conditions - def filterOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" | "IS" | "IS NOT" - def condition = expression ~ filterOperator ~ expression ^^ { - case (lhs ~ op ~ rhs) => { - Condition(lhs, op, rhs) + def cqBodyAtom: Parser[Atom] = + relationName ~ "(" ~ repsep(expr, ",") ~ ")" ^^ { + case (r ~ "(" ~ variableBindings ~ ")") => + Atom(r, variableBindings.zipWithIndex map { + case (e,i) => ColumnExpr(e, r, i) + }) } - } - def conjunctiveCondition = repsep(condition, ",") - def compoundCondition = opt("[") ~> repsep(conjunctiveCondition, ";") <~ opt("]") ^^ { CompoundCondition(_) } - def cqBodyWithCondition = cqBody ~ ("," ~> compoundCondition).? ^^ { - case (b ~ c) => BodyWithConditions(b, c) + def cqBody: Parser[List[Atom]] = rep1sep(cqBodyAtom, ",") + + def cqBodyWithCondition = cqBody ~ ("," ~> cond).? ^^ { + case (b ~ c) => BodyWithCondition(b, c) } def conjunctiveQuery : Parser[ConjunctiveQuery] = cqHead ~ opt("*") ~ ":-" ~ rep1sep(cqBodyWithCondition, ";") ^^ { case (headatom ~ isDistinct ~ ":-" ~ disjunctiveBodies) => - ConjunctiveQuery(headatom, disjunctiveBodies.map(_.body), disjunctiveBodies.map(_.conditions), isDistinct != None) - } + ConjunctiveQuery(headatom, disjunctiveBodies.map(_.body), disjunctiveBodies.map(_.condition), isDistinct != None) + } def relationType: Parser[RelationType] = ( "like" ~> relationName ^^ { RelationTypeAlias(_) } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index fe04a460c..a3c41ca23 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -48,39 +48,40 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { |""".stripMargin } - def printVarOrConst(x: ColumnVariable) = { - x match { - case x: Variable => x.varName - case x: Constant => { - if (x.value.startsWith("'")) - s""" "${x.value.stripPrefix("'").stripSuffix("'")}" """ - else - x.value + // print an expression + def printExpr(e: Expr) : String = { + e match { + case VarExpr(name) => name + case ConstExpr(value) => { + if (value.startsWith("'")) s""" "${value.stripPrefix("'").stripSuffix("'")}" """ + else value } - case _ => "" + case FuncExpr(function, args, agg) => { + val resolvedArgs = args map (x => printExpr(x)) + s"${function}(${resolvedArgs.mkString(", ")})" + } + case BinaryOpExpr(lhs, op, rhs) => s"${printExpr(lhs)} ${op} ${printExpr(rhs)}" } } - def printColumnVar(x: ColumnVariable) = { - x match { - case InlineFunction(name, args, _) => { - val resolvedArgs = args map printVarOrConst - s"${name}(${resolvedArgs.mkString(", ")})" + // print a condition + def printCond(cond: Cond) : String = { + cond match { + case ComparisonCond(lhs, op, rhs) => s"${printExpr(lhs)} ${op} ${printExpr(rhs)}" + case NegationCond(c) => s"![${printCond(c)}]" + case BinaryOpCond(lhs, op, rhs) => { + op match { + case LogicOperator.AND => s"[${printCond(lhs)}], [${printCond(rhs)}]" + case LogicOperator.OR => s"[${printCond(lhs)}]; [${printCond(rhs)}]" + } } - case _ => printVarOrConst(x) } } - def printExpr(e: Expression, resolve: ColumnVariable => String) = { - val resolvedVars = e.variables map (resolve(_)) - val rest = ((e.ops zip resolvedVars.drop(1)).map { case (a,b) => s"${a} ${b}" }).mkString(" ") - resolvedVars(0) + (if (rest != "") " " + rest else "") - } - def print(cq: ConjunctiveQuery): String = { val printAtom = {a:Atom => val vars = a.terms map { - case e => printExpr(e, printColumnVar) + case e => printExpr(e.expr) } s"${a.name}(${vars.mkString(", ")})" } @@ -88,20 +89,14 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { s"${(a map printAtom).mkString(",\n ")}" } - val printConjunctiveCondition = {a: List[Condition] => - (a map { case Condition(lhs, op, rhs) => - val lhsExpr = printExpr(lhs, printColumnVar) - val rhsExpr = printExpr(rhs, printColumnVar) - s"${lhsExpr} ${op} ${rhsExpr}" }).mkString(", ") - } val conditionList = cq.conditions map { - case Some(x) => Some(x.conditions map printConjunctiveCondition mkString("; ")) + case Some(x) => Some(printCond(x)) case None => None } val bodyList = cq.bodies map printListAtom val bodyWithCondition = (bodyList zip conditionList map { case(a,b) => b match { - case Some(c) => s"${a}, [ ${c} ]" + case Some(c) => s"${a}, ${c}" case None => a } }).mkString(";\n ") diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 090108a90..3ce87caa8 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -55,16 +55,24 @@ style: "sql_extractor" } + deepdive.extraction.extractors.extraction_rule_4 { + sql: """ DROP VIEW IF EXISTS P; + CREATE VIEW P AS + SELECT R1.p AS "b.R1.p" , R1.q AS "b.R1.q" , MAX(R1.r) AS column_2 + FROM a R0, b R1 + WHERE R1.k = R0.k + """ + style: "sql_extractor" + + } + + deepdive.extraction.extractors.extraction_rule_3 { sql: """ DROP VIEW IF EXISTS Q; CREATE VIEW Q AS - SELECT DISTINCT 'test' :: TEXT, 123, R0.k AS "a.R0.k" , unnest(R1.q) + SELECT DISTINCT 'test' :: TEXT AS column_0, 123 AS column_1, R0.k AS "a.R0.k" , unnest(R1.q) AS column_3 FROM a R0, b R1, c R2 - WHERE R1.k = R0.k AND R2.s = R1.p || R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR (R1.r < 20 AND R1.r > 10)) UNION ALL - SELECT R1.p AS "b.R1.p" , R1.q AS "b.R1.q" , MAX(R1.r) AS "MAX_R1.r" - FROM a R0, b R1 - WHERE R1.k = R0.k - GROUP BY R1.p, R1.q + WHERE R1.k = R0.k AND R2.s = R1.p || R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR ((NOT (R1.r < 20)) AND (R1.r < 50))) """ style: "sql_extractor" @@ -72,6 +80,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2] -deepdive.pipeline.pipelines.extraction: [extraction_rule_3] -deepdive.pipeline.pipelines.endtoend: [extraction_rule_3] +deepdive.pipeline.pipelines.extraction: [extraction_rule_4, extraction_rule_3] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_4, extraction_rule_3] deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index 0795e99bb..f0ba156c5 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -2,6 +2,6 @@ a(k int). b(k int, p text, q text, r int). c(s text, n int, t text). -Q("test" :: TEXT, 123, id, unnest(y)) * :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; z < 20, z > 10]. +Q("test" :: TEXT, 123, id, unnest(y)) * :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; !z < 20, z < 50]. -Q(y, z, MAX(w)) :- a(x), b(x,y,z,w). \ No newline at end of file +P(y, z, MAX(w)) :- a(x), b(x,y,z,w). \ No newline at end of file diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index f6312d0b2..97d9a9859 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -12,9 +12,9 @@ c(s text, Q( "test" :: TEXT, 123, id, unnest(y)) * :- a(id), b(id, x, y, z), - c(x || y, 10, "foo" ), [ z > 100; z < 20, z > 10 ]. + c(x || y, 10, "foo" ), [z > 100]; [[![z < 20]], [z < 50]]. -Q(y, z, MAX(w)) :- +P(y, z, MAX(w)) :- a(x), b(x, y, z, w). From 199b7e30cc6be3e1a4d1cc03dccb04095eaa033b Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Fri, 10 Jul 2015 14:30:12 -0700 Subject: [PATCH 163/347] Add traling "/" to ${APP_HOME}, remove leading "/" in udf --- examples/chunking.ddl | 4 +-- examples/spouse_example.ddl | 6 ++-- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 2 +- .../spouse_example/print-incremental.expected | 28 ++++++++--------- .../spouse_example/print.expected | 16 +++++----- .../spouse_example_new_feature/input.ddl | 6 ++-- .../print-incremental.expected | 28 ++++++++--------- .../spouse_example_new_inference/input.ddl | 6 ++-- .../print-incremental.expected | 30 +++++++++---------- .../print.expected | 18 +++++------ 10 files changed, 72 insertions(+), 72 deletions(-) diff --git a/examples/chunking.ddl b/examples/chunking.ddl index 1ef26623d..f73307681 100644 --- a/examples/chunking.ddl +++ b/examples/chunking.ddl @@ -23,7 +23,7 @@ tag?(word_id bigint) Categorical(13). function ext_training over like words_raw returns like words - implementation "/udf/ext_training.py" handles tsv lines. + implementation "udf/ext_training.py" handles tsv lines. words :- !ext_training(words_raw). @@ -34,7 +34,7 @@ ext_features_input(word_id1, word1, pos1, word2, pos2) :- function ext_features over like ext_features_input returns like word_features - implementation "/udf/ext_features.py" handles tsv lines. + implementation "udf/ext_features.py" handles tsv lines. word_features :- !ext_features(ext_features_input). diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index b6fd72021..040545fce 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -42,7 +42,7 @@ ext_people_input(s, words, ner_tags) :- function ext_people over like ext_people_input returns like people_mentions - implementation "/udf/ext_people.py" handles tsv lines. + implementation "udf/ext_people.py" handles tsv lines. has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). @@ -53,7 +53,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation "/udf/ext_has_spouse.py" handles tsv lines. + implementation "udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). @@ -66,7 +66,7 @@ ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation "/udf/ext_has_spouse_features.py" handles tsv lines. + implementation "udf/ext_has_spouse_features.py" handles tsv lines. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index cdb9521ac..cf90af4a7 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -518,7 +518,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val function = ss.resolveFunctionName(stmt.function) val udfDetails = (function.implementations collectFirst { case impl: RowWiseLineHandler => - s"""udf: $${APP_HOME}\"${StringEscapeUtils.escapeJava(impl.command)}\" + s"""udf: $${APP_HOME}\"/${StringEscapeUtils.escapeJava(impl.command)}\" style: \"${impl.format}_extractor\" """ }) diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index d552faf9f..08a87f0a6 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -7,7 +7,7 @@ dd_delta_articles(article_id text, dd_new_articles(article_id text, text text). -dd_new_articles(article_id, text) :- +dd_new_articles(article_id, text) :- articles(article_id, text); dd_delta_articles(article_id, text). @@ -41,7 +41,7 @@ dd_new_sentences(document_id text, sentence_offset int, sentence_id text). -dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) :- +dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) :- sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); dd_delta_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). @@ -63,7 +63,7 @@ dd_new_people_mentions(sentence_id text, text text, mention_id text). -dd_new_people_mentions(sentence_id, start_position, length, text, mention_id) :- +dd_new_people_mentions(sentence_id, start_position, length, text, mention_id) :- people_mentions(sentence_id, start_position, length, text, mention_id); dd_delta_people_mentions(sentence_id, start_position, length, text, mention_id). @@ -88,7 +88,7 @@ dd_new_has_spouse_candidates(person1_id text, relation_id text, is_true boolean). -dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true) :- +dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true) :- has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true); dd_delta_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true). @@ -101,7 +101,7 @@ dd_delta_has_spouse_features(relation_id text, dd_new_has_spouse_features(relation_id text, feature text). -dd_new_has_spouse_features(relation_id, feature) :- +dd_new_has_spouse_features(relation_id, feature) :- has_spouse_features(relation_id, feature); dd_delta_has_spouse_features(relation_id, feature). @@ -111,24 +111,24 @@ dd_delta_has_spouse?(relation_id text). dd_new_has_spouse?(relation_id text). -dd_new_has_spouse(relation_id) :- +dd_new_has_spouse(relation_id) :- has_spouse(relation_id); dd_delta_has_spouse(relation_id). dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). -dd_delta_ext_people_input(s, words, ner_tags) :- +dd_delta_ext_people_input(s, words, ner_tags) :- dd_delta_sentences(a, b, words, c, d, e, ner_tags, f, s). function ext_people over like dd_delta_ext_people_input returns like dd_delta_people_mentions - implementation "/udf/ext_people.py" + implementation "udf/ext_people.py" handles tsv lines. dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). -dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- +dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- dd_delta_people_mentions(s, a, b, p1_text, p1_id), people_mentions(s, c, d, p2_text, p2_id); dd_new_people_mentions(s, a, b, p1_text, p1_id), @@ -137,12 +137,12 @@ dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like dd_delta_ext_has_spouse_input returns like dd_delta_has_spouse_candidates - implementation "/udf/ext_has_spouse.py" + implementation "udf/ext_has_spouse.py" handles tsv lines. dd_delta_has_spouse_features :- !ext_has_spouse_features(dd_delta_ext_has_spouse_features_input). -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), @@ -163,14 +163,14 @@ dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) : function ext_has_spouse_features over like dd_delta_ext_has_spouse_features_input returns like dd_delta_has_spouse_features - implementation "/udf/ext_has_spouse_features.py" + implementation "udf/ext_has_spouse_features.py" handles tsv lines. -dd_delta_has_spouse(rid) :- +dd_delta_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l) label = l. -dd_new_has_spouse(rid) :- +dd_new_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f); dd_new_has_spouse_candidates(a, b, c, d, rid, l), diff --git a/test/expected-output-test/spouse_example/print.expected b/test/expected-output-test/spouse_example/print.expected index 0827f99cf..db505c038 100644 --- a/test/expected-output-test/spouse_example/print.expected +++ b/test/expected-output-test/spouse_example/print.expected @@ -31,30 +31,30 @@ has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). -ext_people_input(s, words, ner_tags) :- +ext_people_input(s, words, ner_tags) :- sentences(a, b, words, c, d, e, ner_tags, f, s). function ext_people over like ext_people_input returns like people_mentions - implementation "/udf/ext_people.py" + implementation "udf/ext_people.py" handles tsv lines. has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). -ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- +ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, a, b, p1_text, p1_id), people_mentions(s, c, d, p2_text, p2_id). function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation "/udf/ext_has_spouse.py" + implementation "udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). -ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- +ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), @@ -63,14 +63,14 @@ ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation "/udf/ext_has_spouse_features.py" + implementation "udf/ext_has_spouse_features.py" handles tsv lines. -has_spouse(rid) :- +has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. -has_spouse(rid) :- +has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f diff --git a/test/expected-output-test/spouse_example_new_feature/input.ddl b/test/expected-output-test/spouse_example_new_feature/input.ddl index f5c1bf625..4e5711eea 100644 --- a/test/expected-output-test/spouse_example_new_feature/input.ddl +++ b/test/expected-output-test/spouse_example_new_feature/input.ddl @@ -42,7 +42,7 @@ ext_people_input(s, words, ner_tags) :- function ext_people over like ext_people_input returns like people_mentions - implementation "/udf/ext_people.py" handles tsv lines + implementation "udf/ext_people.py" handles tsv lines mode = inc. has_spouse_candidates :- @@ -54,7 +54,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation "/udf/ext_has_spouse.py" handles tsv lines. + implementation "udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). @@ -67,7 +67,7 @@ ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation "/udf/ext_has_spouse_features.py" handles tsv lines. + implementation "udf/ext_has_spouse_features.py" handles tsv lines. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. diff --git a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected index edc9e85c5..8ebd7d939 100644 --- a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected @@ -7,7 +7,7 @@ dd_delta_articles(article_id text, dd_new_articles(article_id text, text text). -dd_new_articles(article_id, text) :- +dd_new_articles(article_id, text) :- articles(article_id, text); dd_delta_articles(article_id, text). @@ -41,7 +41,7 @@ dd_new_sentences(document_id text, sentence_offset int, sentence_id text). -dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) :- +dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) :- sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); dd_delta_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). @@ -63,7 +63,7 @@ dd_new_people_mentions(sentence_id text, text text, mention_id text). -dd_new_people_mentions(sentence_id, start_position, length, text, mention_id) :- +dd_new_people_mentions(sentence_id, start_position, length, text, mention_id) :- people_mentions(sentence_id, start_position, length, text, mention_id); dd_delta_people_mentions(sentence_id, start_position, length, text, mention_id). @@ -88,7 +88,7 @@ dd_new_has_spouse_candidates(person1_id text, relation_id text, is_true boolean). -dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true) :- +dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true) :- has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true); dd_delta_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true). @@ -101,7 +101,7 @@ dd_delta_has_spouse_features(relation_id text, dd_new_has_spouse_features(relation_id text, feature text). -dd_new_has_spouse_features(relation_id, feature) :- +dd_new_has_spouse_features(relation_id, feature) :- has_spouse_features(relation_id, feature); dd_delta_has_spouse_features(relation_id, feature). @@ -111,25 +111,25 @@ dd_delta_has_spouse?(relation_id text). dd_new_has_spouse?(relation_id text). -dd_new_has_spouse(relation_id) :- +dd_new_has_spouse(relation_id) :- has_spouse(relation_id); dd_delta_has_spouse(relation_id). dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). -dd_delta_ext_people_input(s, words, ner_tags) :- +dd_delta_ext_people_input(s, words, ner_tags) :- sentences(a, b, words, c, d, e, ner_tags, f, s); dd_delta_sentences(a, b, words, c, d, e, ner_tags, f, s). function ext_people over like dd_delta_ext_people_input returns like dd_delta_people_mentions - implementation "/udf/ext_people.py" + implementation "udf/ext_people.py" handles tsv lines mode = inc. dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). -dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- +dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- dd_delta_people_mentions(s, a, b, p1_text, p1_id), people_mentions(s, c, d, p2_text, p2_id); dd_new_people_mentions(s, a, b, p1_text, p1_id), @@ -138,12 +138,12 @@ dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like dd_delta_ext_has_spouse_input returns like dd_delta_has_spouse_candidates - implementation "/udf/ext_has_spouse.py" + implementation "udf/ext_has_spouse.py" handles tsv lines. dd_delta_has_spouse_features :- !ext_has_spouse_features(dd_delta_ext_has_spouse_features_input). -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), @@ -164,14 +164,14 @@ dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) : function ext_has_spouse_features over like dd_delta_ext_has_spouse_features_input returns like dd_delta_has_spouse_features - implementation "/udf/ext_has_spouse_features.py" + implementation "udf/ext_has_spouse_features.py" handles tsv lines. -dd_delta_has_spouse(rid) :- +dd_delta_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l) label = l. -dd_new_has_spouse(rid) :- +dd_new_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f); dd_new_has_spouse_candidates(a, b, c, d, rid, l), diff --git a/test/expected-output-test/spouse_example_new_inference/input.ddl b/test/expected-output-test/spouse_example_new_inference/input.ddl index e6eb50fce..08e9b910e 100644 --- a/test/expected-output-test/spouse_example_new_inference/input.ddl +++ b/test/expected-output-test/spouse_example_new_inference/input.ddl @@ -42,7 +42,7 @@ ext_people_input(s, words, ner_tags) :- function ext_people over like ext_people_input returns like people_mentions - implementation "/udf/ext_people.py" handles tsv lines. + implementation "udf/ext_people.py" handles tsv lines. has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). @@ -53,7 +53,7 @@ ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation "/udf/ext_has_spouse.py" handles tsv lines. + implementation "udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). @@ -66,7 +66,7 @@ ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation "/udf/ext_has_spouse_features.py" handles tsv lines. + implementation "udf/ext_has_spouse_features.py" handles tsv lines. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. diff --git a/test/expected-output-test/spouse_example_new_inference/print-incremental.expected b/test/expected-output-test/spouse_example_new_inference/print-incremental.expected index baab6b961..3f1ab2e4a 100644 --- a/test/expected-output-test/spouse_example_new_inference/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_inference/print-incremental.expected @@ -7,7 +7,7 @@ dd_delta_articles(article_id text, dd_new_articles(article_id text, text text). -dd_new_articles(article_id, text) :- +dd_new_articles(article_id, text) :- articles(article_id, text); dd_delta_articles(article_id, text). @@ -41,7 +41,7 @@ dd_new_sentences(document_id text, sentence_offset int, sentence_id text). -dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) :- +dd_new_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id) :- sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id); dd_delta_sentences(document_id, sentence, words, lemma, pos_tags, dependencies, ner_tags, sentence_offset, sentence_id). @@ -63,7 +63,7 @@ dd_new_people_mentions(sentence_id text, text text, mention_id text). -dd_new_people_mentions(sentence_id, start_position, length, text, mention_id) :- +dd_new_people_mentions(sentence_id, start_position, length, text, mention_id) :- people_mentions(sentence_id, start_position, length, text, mention_id); dd_delta_people_mentions(sentence_id, start_position, length, text, mention_id). @@ -88,7 +88,7 @@ dd_new_has_spouse_candidates(person1_id text, relation_id text, is_true boolean). -dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true) :- +dd_new_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true) :- has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true); dd_delta_has_spouse_candidates(person1_id, person2_id, sentence_id, description, relation_id, is_true). @@ -101,7 +101,7 @@ dd_delta_has_spouse_features(relation_id text, dd_new_has_spouse_features(relation_id text, feature text). -dd_new_has_spouse_features(relation_id, feature) :- +dd_new_has_spouse_features(relation_id, feature) :- has_spouse_features(relation_id, feature); dd_delta_has_spouse_features(relation_id, feature). @@ -111,24 +111,24 @@ dd_delta_has_spouse?(relation_id text). dd_new_has_spouse?(relation_id text). -dd_new_has_spouse(relation_id) :- +dd_new_has_spouse(relation_id) :- has_spouse(relation_id); dd_delta_has_spouse(relation_id). dd_delta_people_mentions :- !ext_people(dd_delta_ext_people_input). -dd_delta_ext_people_input(s, words, ner_tags) :- +dd_delta_ext_people_input(s, words, ner_tags) :- dd_delta_sentences(a, b, words, c, d, e, ner_tags, f, s). function ext_people over like dd_delta_ext_people_input returns like dd_delta_people_mentions - implementation "/udf/ext_people.py" + implementation "udf/ext_people.py" handles tsv lines. dd_delta_has_spouse_candidates :- !ext_has_spouse(dd_delta_ext_has_spouse_input). -dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- +dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- dd_delta_people_mentions(s, a, b, p1_text, p1_id), people_mentions(s, c, d, p2_text, p2_id); dd_new_people_mentions(s, a, b, p1_text, p1_id), @@ -137,12 +137,12 @@ dd_delta_ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- function ext_has_spouse over like dd_delta_ext_has_spouse_input returns like dd_delta_has_spouse_candidates - implementation "/udf/ext_has_spouse.py" + implementation "udf/ext_has_spouse.py" handles tsv lines. dd_delta_has_spouse_features :- !ext_has_spouse_features(dd_delta_ext_has_spouse_features_input). -dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- +dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- dd_delta_sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), @@ -163,14 +163,14 @@ dd_delta_ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) : function ext_has_spouse_features over like dd_delta_ext_has_spouse_features_input returns like dd_delta_has_spouse_features - implementation "/udf/ext_has_spouse_features.py" + implementation "udf/ext_has_spouse_features.py" handles tsv lines. -dd_delta_has_spouse(rid) :- +dd_delta_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l) label = l. -dd_new_has_spouse(rid) :- +dd_new_has_spouse(rid) :- dd_delta_has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f); dd_new_has_spouse_candidates(a, b, c, d, rid, l), @@ -178,7 +178,7 @@ dd_new_has_spouse(rid) :- weight = f semantics = Imply. -dd_new_has_spouse(rid) :- +dd_new_has_spouse(rid) :- dd_new_has_spouse(rid2), dd_new_has_spouse_candidates(a1, b1, c1, d1, rid, l1), dd_new_has_spouse_candidates(b1, a1, c2, d2, rid2, l2) diff --git a/test/expected-output-test/spouse_example_new_inference/print.expected b/test/expected-output-test/spouse_example_new_inference/print.expected index 4e8b107bd..3aefbc215 100644 --- a/test/expected-output-test/spouse_example_new_inference/print.expected +++ b/test/expected-output-test/spouse_example_new_inference/print.expected @@ -31,30 +31,30 @@ has_spouse?(relation_id text). people_mentions :- !ext_people(ext_people_input). -ext_people_input(s, words, ner_tags) :- +ext_people_input(s, words, ner_tags) :- sentences(a, b, words, c, d, e, ner_tags, f, s). function ext_people over like ext_people_input returns like people_mentions - implementation "/udf/ext_people.py" + implementation "udf/ext_people.py" handles tsv lines. has_spouse_candidates :- !ext_has_spouse(ext_has_spouse_input). -ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- +ext_has_spouse_input(s, p1_id, p1_text, p2_id, p2_text) :- people_mentions(s, a, b, p1_text, p1_id), people_mentions(s, c, d, p2_text, p2_id). function ext_has_spouse over like ext_has_spouse_input returns like has_spouse_candidates - implementation "/udf/ext_has_spouse.py" + implementation "udf/ext_has_spouse.py" handles tsv lines. has_spouse_features :- !ext_has_spouse_features(ext_has_spouse_features_input). -ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- +ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- sentences(a, b, words, c, d, e, f, g, s), has_spouse_candidates(person1_id, person2_id, s, h, rid, x), people_mentions(s, p1idx, p1len, k, person1_id), @@ -63,20 +63,20 @@ ext_has_spouse_features_input(words, rid, p1idx, p1len, p2idx, p2len) :- function ext_has_spouse_features over like ext_has_spouse_features_input returns like has_spouse_features - implementation "/udf/ext_has_spouse_features.py" + implementation "udf/ext_has_spouse_features.py" handles tsv lines. -has_spouse(rid) :- +has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. -has_spouse(rid) :- +has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f semantics = Imply. -has_spouse(rid) :- +has_spouse(rid) :- has_spouse(rid2), has_spouse_candidates(a1, b1, c1, d1, rid, l1), has_spouse_candidates(b1, a1, c2, d2, rid2, l2) From 0088ddcffce269e26b9c38d8bf67eecd4798036b Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Fri, 10 Jul 2015 16:00:54 -0700 Subject: [PATCH 164/347] Refactoring 1. Rename some functions, classes and variables 2. Remove unused code in parser 3. Change bracket style 4. Better where clause generation 5. Use unified expression in statements 6. Type cast --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 69 ++++++++++--------- .../ddlog/DeepDiveLogDeltaDeriver.scala | 30 +++----- .../ddlog/DeepDiveLogMergeDeriver.scala | 12 +--- .../deepdive/ddlog/DeepDiveLogParser.scala | 35 ++++------ .../ddlog/DeepDiveLogPrettyPrinter.scala | 14 ++-- .../ddlog/DeepDiveLogSemanticChecker.scala | 6 +- test/compile-error-test.bats.template | 4 +- .../expressions/compile.expected | 2 +- .../expressions/print.expected | 2 +- 9 files changed, 72 insertions(+), 102 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index cf90af4a7..374145b86 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -114,8 +114,8 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C mode = config.mode statements.foreach { case SchemaDeclaration(Attribute(r, terms, types), isQuery, vType) => { - terms.foreach { - case Variable(n,r,i) => + terms.zipWithIndex.foreach { + case (VarExpr(n), i) => schema += { (r,i) -> n } ground_relations += { r -> !isQuery } // record whether a query or a ground term. if (isQuery) variableType += { r -> vType.get } @@ -189,7 +189,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } // resolve an expression - def resolveExpr(e: Expr, cq: ConjunctiveQuery, alias: AliasStyle, index : Int, isHead: Boolean) : String = { + def compileExpr(e: Expr, cq: ConjunctiveQuery, alias: AliasStyle, index : Int, isHead: Boolean) : String = { def recurse(e: Expr, alias: AliasStyle, depth: Int) : String = { // for columns without a name (constant, function call, binary operator), add an column index alias if necessary val columnAlias = if (depth == 0 && isHead) s" AS column_${index}" else "" @@ -212,17 +212,17 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } // resolve a condition - def resolveCond(cond: Cond, cq: ConjunctiveQuery) : String = { + def compileCond(cond: Cond, cq: ConjunctiveQuery) : String = { cond match { case ComparisonCond(lhs, op, rhs) => - s"${resolveExpr(lhs, cq, OriginalOnly, 0, false)} ${op} ${resolveExpr(rhs, cq, OriginalOnly, 0, false)}" - case NegationCond(c) => s"NOT (${resolveCond(c, cq)})" - case BinaryOpCond(lhs, op, rhs) => { - val resolvedLhs = s"${resolveCond(lhs, cq)}" - val resolvedRhs = s"${resolveCond(rhs, cq)}" + s"${compileExpr(lhs, cq, OriginalOnly, 0, false)} ${op} ${compileExpr(rhs, cq, OriginalOnly, 0, false)}" + case NegationCond(c) => s"NOT (${compileCond(c, cq)})" + case CompoundCond(lhs, op, rhs) => { + val resolvedLhs = s"${compileCond(lhs, cq)}" + val resolvedRhs = s"${compileCond(rhs, cq)}" op match { - case LogicOperator.AND => s"(${resolvedLhs}) AND (${resolvedRhs})" - case LogicOperator.OR => s"(${resolvedLhs}) OR (${resolvedRhs})" + case LogicOperator.AND => s"(${resolvedLhs} AND ${resolvedRhs})" + case LogicOperator.OR => s"(${resolvedLhs} OR ${resolvedRhs})" } } } @@ -238,9 +238,9 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C var whereClause = z.bodies(0).zipWithIndex flatMap { case (Atom(relName, terms),bodyIndex) => { - terms flatMap { case ColumnExpr(expr, relName, index) => + terms.zipWithIndex flatMap { case (expr, index) => expr match { - // a simple variable + // a simple variable indicates a join condition with other columns having the same variable name case VarExpr(varName) => { val canonical_body_index = qs.getBodyIndex(varName) if (canonical_body_index != bodyIndex) { @@ -249,9 +249,9 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C Some(s"R${ bodyIndex }.${ real_attr_name1 } = R${ canonical_body_index }.${ real_attr_name2 } ") } else { None } } - // other expressions + // other expressions indicate a filter condition on the column case _ => { - val resolved = resolveExpr(expr, z, OriginalOnly, index, false) + val resolved = compileExpr(expr, z, OriginalOnly, index, false) val attr = schema(relName, index) Some(s"R${bodyIndex}.${attr} = ${resolved}") } @@ -262,16 +262,16 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // resolve conditions val conditionStr = z.conditions(0) match { - case Some(c) => resolveCond(c, z) + case Some(c) => compileCond(c, z) case None => "" } // handle group by // map head terms, leaving out aggregation functions - val groupbyTerms = z.head.terms map { case ColumnExpr(expr, relName, index) => + val groupbyTerms = z.head.terms.zipWithIndex map { case (expr, index) => expr match { case FuncExpr(f, args, agg) => if (agg) None else Some("") - case _ => Some(resolveExpr(expr, z, OriginalOnly, index, false)) + case _ => Some(compileExpr(expr, z, OriginalOnly, index, false)) } } @@ -281,9 +281,10 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C s"\n GROUP BY ${groupbyTerms.mkString(", ")}" } - var whereClauseStr = (whereClause match { - case Nil => if (conditionStr == "") "" else s"WHERE ${conditionStr}" - case _ => s"""WHERE ${whereClause.mkString(" AND ")} ${if (conditionStr == "") "" else s" AND (${conditionStr})"}""" + val whereClauseStrBody = List(whereClause.mkString(" AND "), conditionStr).filter(_ != "").mkString(" AND ") + val whereClauseStr = (whereClauseStrBody match { + case "" => "" + case _ => "WHERE " + whereClauseStrBody }) + groupbyStr s"""FROM ${ bodyNames } @@ -318,8 +319,8 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // Analyze the block visibility among statements def analyzeVisible(statements: List[Statement]) = { - extractionRuleGroupByHead foreach {keyVal => visible += keyVal._2(0)} - functionCallList foreach { stmt => visible += stmt } + extractionRuleGroupByHead.values foreach { value => visible += value(0) } + functionCallList foreach { visible += _ } } // Analyze the dependency between statements and construct a graph. @@ -360,11 +361,11 @@ class QuerySchema(q : ConjunctiveQuery) { def generateCanonicalVar() = { q.bodies(0).zipWithIndex.foreach { case (Atom(relName,terms),index) => { - terms.foreach { case ColumnExpr(expr, r, i) => + terms.zipWithIndex.foreach { case (expr, i) => expr match { case VarExpr(v) => if (! (query_schema contains v) ) - query_schema += { v -> (index, Variable(v,r,i) ) } + query_schema += { v -> (index, Variable(v,relName,i) ) } case _ => } } @@ -390,8 +391,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { for (stmt <- stmts) { if ((stmt.a.name startsWith "dd_new_") && (ss.inferenceRuleGroupByHead contains stmt.a.name)) { } else { - var columnDecls = stmt.a.terms map { - case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" + var columnDecls = stmt.a.terms.zipWithIndex map { + case (VarExpr(name),i) => s"${name} ${stmt.a.types(i)}" } if (stmt.isQuery) { val labelColumn = stmt.variableType match { @@ -440,8 +441,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { if (stmt.supervision != null) { if (stmt.q.bodies.length > 1) ss.error(s"Scoping rule does not allow disjunction.\n") - val headTerms = tmpCq.head.terms map { case ColumnExpr(expr, _, index) => - ss.resolveExpr(expr, tmpCq, OriginalOnly, index, true) + val headTerms = tmpCq.head.terms.zipWithIndex map { case (expr, index) => + ss.compileExpr(expr, tmpCq, OriginalOnly, index, true) } val index = qs.getBodyIndex(stmt.supervision) val name = ss.resolveName(qs.getVar(stmt.supervision)) @@ -451,8 +452,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { ${ ss.generateSQLBody(tmpCq) } """ } else if ((ss.schemaDeclarationGroupByHead contains stmt.q.head.name) && (ss.schemaDeclarationGroupByHead(stmt.q.head.name)(0).isQuery) && (stmt.q.head.name startsWith "dd_new_")) { - val headTerms = tmpCq.head.terms map { case ColumnExpr(expr, _, index) => - ss.resolveExpr(expr, tmpCq, OriginalOnly, index, true) + val headTerms = tmpCq.head.terms.zipWithIndex map { case (expr, index) => + ss.compileExpr(expr, tmpCq, OriginalOnly, index, true) } val headTermsStr = ( headTerms :+ "id" ).mkString(", ") inputQueries += s"""SELECT DISTINCT ${ headTermsStr }, label @@ -469,8 +470,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case true => OriginalOnly case false => OriginalAndAlias } - val variableCols = tmpCq.head.terms map { case ColumnExpr(expr, _, index) => - ss.resolveExpr(expr, tmpCq, resolveColumnFlag, index, true) + val variableCols = tmpCq.head.terms.zipWithIndex map { case (expr, index) => + ss.compileExpr(expr, tmpCq, resolveColumnFlag, index, true) } val selectStr = variableCols.mkString(", ") val distinctStr = if (tmpCq.isDistinct) "DISTINCT" else "" @@ -639,7 +640,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var keys = new ListBuffer[String]() for (stmt <- (ss.schemaDeclarationGroupByHead map (_._2)).flatten) { var columnNames = stmt.a.terms map { - case Variable(name, _, i) => name + case VarExpr(name) => name } if (stmt.isQuery) keys += s"""${stmt.a.name} : [${columnNames.mkString(", ")}]""" } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index d250fcbc2..ad96f33d3 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -23,13 +23,11 @@ object DeepDiveLogDeltaDeriver{ // New head val incCqHead = if (isInference) { cq.head.copy( - name = newPrefix + cq.head.name, - terms = cq.head.terms + name = newPrefix + cq.head.name ) } else { cq.head.copy( - name = deltaPrefix + cq.head.name, - terms = cq.head.terms + name = deltaPrefix + cq.head.name ) } @@ -40,15 +38,13 @@ object DeepDiveLogDeltaDeriver{ // Delta body val incDeltaBody = body map { a => a.copy( - name = deltaPrefix + a.name, - terms = a.terms + name = deltaPrefix + a.name ) } // New body val incNewBody = body map { a => a.copy( - name = newPrefix + a.name, - terms = a.terms + name = newPrefix + a.name ) } var i = 0 @@ -87,9 +83,7 @@ object DeepDiveLogDeltaDeriver{ // Delta table var incDeltaStmt = stmt.copy( a = stmt.a.copy( - name = deltaPrefix + stmt.a.name, - terms = stmt.a.terms map {term => term.copy(relName = deltaPrefix + term.relName)}, - types = stmt.a.types + name = deltaPrefix + stmt.a.name ) ) incrementalStatement += incDeltaStmt @@ -97,23 +91,15 @@ object DeepDiveLogDeltaDeriver{ // New table var incNewStmt = stmt.copy( a = stmt.a.copy( - name = newPrefix + stmt.a.name, - terms = stmt.a.terms map {term => term.copy(relName = newPrefix + term.relName)}, - types = stmt.a.types + name = newPrefix + stmt.a.name ) ) incrementalStatement += incNewStmt - // from schema declaration to expressions - def variableToExpr(v: Variable) = ColumnExpr(VarExpr(v.varName), v.relName, v.index) - val originalExpr = stmt.a.terms map variableToExpr - val incDeltaExpr = incDeltaStmt.a.terms map variableToExpr - val incNewExpr = incNewStmt.a.terms map variableToExpr - // if (!stmt.isQuery) { incrementalStatement += ExtractionRule(ConjunctiveQuery( - Atom(incNewStmt.a.name, incNewExpr), - List(List(Atom(stmt.a.name, originalExpr)), List(Atom(incDeltaStmt.a.name, incDeltaExpr))), + Atom(incNewStmt.a.name, incNewStmt.a.terms), + List(List(Atom(stmt.a.name, stmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))), List(None, None), false)) // } incrementalStatement.toList diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala index cbc73c0c3..3a8567688 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -13,18 +13,12 @@ object DeepDiveLogMergeDeriver{ // New table var incNewStmt = stmt.copy( a = stmt.a.copy( - name = newPrefix + stmt.a.name, - terms = stmt.a.terms map {term => term.copy(relName = newPrefix + term.relName)}, - types = stmt.a.types + name = newPrefix + stmt.a.name ) ) - def variableToExpr(v: Variable) = ColumnExpr(VarExpr(v.varName), v.relName, v.index) - val originalExpr = stmt.a.terms map variableToExpr - val incNewExpr = incNewStmt.a.terms map variableToExpr - - ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, originalExpr), - List(List(Atom(incNewStmt.a.name, incNewExpr))), List(None), false)) + ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, stmt.a.terms), + List(List(Atom(incNewStmt.a.name, incNewStmt.a.terms))), List(None), false)) } def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 90e70b950..1593d3665 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -13,12 +13,7 @@ import scala.util.Try // case class Variable(varName : String, relName : String, index : Int ) // TODO make Atom a trait, and have multiple case classes, e.g., RelationAtom and CondExprAtom // ddlog column variable type: constant or variable -sealed trait ColumnVariable -case class Variable(varName : String, relName : String, index : Int ) extends ColumnVariable - -case class Expression(variables: List[ColumnVariable], ops: List[String], relName: String, index: Int) -case class ColumnExpr(expr: Expr, relName: String, index: Int) -case class Operator(operator: String, operand: ColumnVariable) +case class Variable(varName : String, relName : String, index : Int ) sealed trait Expr case class VarExpr(name: String) extends Expr @@ -26,8 +21,8 @@ case class ConstExpr(value: String) extends Expr case class FuncExpr(function: String, args: List[Expr], isAggregation: Boolean) extends Expr case class BinaryOpExpr(lhs: Expr, op: String, rhs: Expr) extends Expr -case class Atom(name : String, terms : List[ColumnExpr]) -case class Attribute(name : String, terms : List[Variable], types : List[String]) +case class Atom(name : String, terms : List[Expr]) +case class Attribute(name : String, terms : List[VarExpr], types : List[String]) case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[Cond]], isDistinct: Boolean) case class Column(name : String, t : String) case class BodyWithCondition(body: List[Atom], condition: Option[Cond]) @@ -36,7 +31,7 @@ case class BodyWithCondition(body: List[Atom], condition: Option[Cond]) sealed trait Cond case class ComparisonCond(lhs: Expr, op: String, rhs: Expr) extends Cond case class NegationCond(cond: Cond) extends Cond -case class BinaryOpCond(lhs: Cond, op: LogicOperator.LogicOperator, rhs: Cond) extends Cond +case class CompoundCond(lhs: Cond, op: LogicOperator.LogicOperator, rhs: Cond) extends Cond // logic operators object LogicOperator extends Enumeration { @@ -122,7 +117,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def schemaDeclaration: Parser[SchemaDeclaration] = relationName ~ opt("?") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ~ opt(dataType) ^^ { case (r ~ isQuery ~ "(" ~ attrs ~ ")" ~ vType) => { - val vars = attrs.zipWithIndex map { case(x, i) => Variable(x.name, r, i) } + val vars = attrs map { case(x) => VarExpr(x.name) } var types = attrs map { case(x) => x.t } val variableType = vType match { case None => if (isQuery != None) Some(BooleanType) else None @@ -132,12 +127,14 @@ class DeepDiveLogParser extends JavaTokenParsers { } } - def operator = "||" | "+" | "-" | "*" | "/" | "&" | "::" + def operator = "||" | "+" | "-" | "*" | "/" | "&" + def typeOperator = "::" val aggregationFunctions = Set("MAX", "SUM", "MIN", "ARRAY_ACCUM", "ARRAY_AGG") // expressions def expr : Parser[Expr] = ( lexpr ~ operator ~ expr ^^ { case (lhs ~ op ~ rhs) => BinaryOpExpr(lhs, op, rhs) } + | lexpr ~ typeOperator ~ constant ^^ { case (lhs ~ op ~ rhs) => BinaryOpExpr(lhs, op, ConstExpr(rhs)) } | lexpr ) @@ -152,22 +149,19 @@ class DeepDiveLogParser extends JavaTokenParsers { // TODO support aggregate function syntax somehow def cqHead = relationName ~ "(" ~ rep1sep(expr, ",") ~ ")" ^^ { - case (r ~ "(" ~ variableUses ~ ")") => - Atom(r, variableUses.zipWithIndex map { - case (e,i) => ColumnExpr(e, r, i) - }) - } + case (r ~ "(" ~ expressions ~ ")") => Atom(r, expressions) + } // conditional expressions def compareOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" | "IS" | "IS NOT" def cond : Parser[Cond] = ( acond ~ (";") ~ cond ^^ { case (lhs ~ op ~ rhs) => - BinaryOpCond(lhs, LogicOperator.OR, rhs) + CompoundCond(lhs, LogicOperator.OR, rhs) } | acond ) def acond : Parser[Cond] = - ( lcond ~ (",") ~ acond ^^ { case (lhs ~ op ~ rhs) => BinaryOpCond(lhs, LogicOperator.AND, rhs) } + ( lcond ~ (",") ~ acond ^^ { case (lhs ~ op ~ rhs) => CompoundCond(lhs, LogicOperator.AND, rhs) } | lcond ) // ! has higher priority... @@ -184,10 +178,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def cqBodyAtom: Parser[Atom] = relationName ~ "(" ~ repsep(expr, ",") ~ ")" ^^ { - case (r ~ "(" ~ variableBindings ~ ")") => - Atom(r, variableBindings.zipWithIndex map { - case (e,i) => ColumnExpr(e, r, i) - }) + case (r ~ "(" ~ patterns ~ ")") => Atom(r, patterns) } def cqBody: Parser[List[Atom]] = rep1sep(cqBodyAtom, ",") diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index a3c41ca23..b61619026 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -16,8 +16,8 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { } def print(stmt: SchemaDeclaration): String = { - val columnDecls = stmt.a.terms map { - case Variable(name, _, i) => s"${name} ${stmt.a.types(i)}" + val columnDecls = stmt.a.terms.zipWithIndex map { + case (VarExpr(name),i) => s"${name} ${stmt.a.types(i)}" } val prefix = s"${stmt.a.name}${if (stmt.isQuery) "?" else ""}(" val indentation = " " * prefix.length @@ -69,10 +69,10 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { cond match { case ComparisonCond(lhs, op, rhs) => s"${printExpr(lhs)} ${op} ${printExpr(rhs)}" case NegationCond(c) => s"![${printCond(c)}]" - case BinaryOpCond(lhs, op, rhs) => { + case CompoundCond(lhs, op, rhs) => { op match { - case LogicOperator.AND => s"[${printCond(lhs)}], [${printCond(rhs)}]" - case LogicOperator.OR => s"[${printCond(lhs)}]; [${printCond(rhs)}]" + case LogicOperator.AND => s"[${printCond(lhs)}, ${printCond(rhs)}]" + case LogicOperator.OR => s"[${printCond(lhs)}; ${printCond(rhs)}]" } } } @@ -80,9 +80,7 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { def print(cq: ConjunctiveQuery): String = { val printAtom = {a:Atom => - val vars = a.terms map { - case e => printExpr(e.expr) - } + val vars = a.terms map printExpr s"${a.name}(${vars.mkString(", ")})" } val printListAtom = {a:List[Atom] => diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala index 7468979dc..ca2f7fc36 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala @@ -76,9 +76,9 @@ object DeepDiveLogSemanticChecker extends DeepDiveLogHandler { stmt match { case SchemaDeclaration(Attribute(r, terms, types), isQuery, vType) => { if (isQuery) { - terms.foreach { x => - if (reservedSet contains x.varName) - error(stmt, s"""variable relation contains reserved column "${x.varName}" """) + terms.foreach { case VarExpr(name) => + if (reservedSet contains name) + error(stmt, s"""variable relation contains reserved column "${name}" """) } } } diff --git a/test/compile-error-test.bats.template b/test/compile-error-test.bats.template index 1a8d59d76..2488fa915 100644 --- a/test/compile-error-test.bats.template +++ b/test/compile-error-test.bats.template @@ -1,7 +1,7 @@ #!/usr/bin/env bats # Compile error test # -# The test case here feeds a malformed .ddl into ddlog's print command and compares whether it produces an expected error. +# The test case here feeds a malformed .ddl into ddlog's check command and compares whether it produces an expected error. source "$BATS_TEST_DIRNAME"/../bats-template.bash # for $TESTDIR, $it, etc. @@ -15,7 +15,7 @@ setup() { # check if input produces a compile error @test "$it compiles input" { - ! ddlog compile "$TESTDIR"/input.ddl >/dev/null 2>"$actualError" + ! ddlog check "$TESTDIR"/input.ddl >/dev/null 2>"$actualError" diff "$expectedError" "$actualError" } diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 3ce87caa8..b24266301 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -72,7 +72,7 @@ CREATE VIEW Q AS SELECT DISTINCT 'test' :: TEXT AS column_0, 123 AS column_1, R0.k AS "a.R0.k" , unnest(R1.q) AS column_3 FROM a R0, b R1, c R2 - WHERE R1.k = R0.k AND R2.s = R1.p || R1.q AND R2.n = 10 AND R2.t = 'foo' AND ((R1.r > 100) OR ((NOT (R1.r < 20)) AND (R1.r < 50))) + WHERE R1.k = R0.k AND R2.s = R1.p || R1.q AND R2.n = 10 AND R2.t = 'foo' AND (R1.r > 100 OR (NOT (R1.r < 20) AND R1.r < 50)) """ style: "sql_extractor" diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index 97d9a9859..c858e7f0a 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -12,7 +12,7 @@ c(s text, Q( "test" :: TEXT, 123, id, unnest(y)) * :- a(id), b(id, x, y, z), - c(x || y, 10, "foo" ), [z > 100]; [[![z < 20]], [z < 50]]. + c(x || y, 10, "foo" ), [z > 100; [![z < 20], z < 50]]. P(y, z, MAX(w)) :- a(x), From ecb4d6fbeefcfaf2c67a2d51760322f08933953f Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Fri, 10 Jul 2015 16:52:31 -0700 Subject: [PATCH 165/347] Add parenthesis to expressions, add more tests --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 2 +- .../ddlog/DeepDiveLogPrettyPrinter.scala | 2 +- .../expressions/compile.expected | 150 ++++++++++++++++-- .../expressions/input.ddl | 28 +++- .../expressions/print.expected | 37 ++++- 5 files changed, 205 insertions(+), 14 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 374145b86..7e6e7ebc0 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -204,7 +204,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C case BinaryOpExpr(lhs, op, rhs) => { val resovledLhs = recurse(lhs, OriginalOnly, depth + 1) val resovledRhs = recurse(rhs, OriginalOnly, depth + 1) - s"${resovledLhs} ${op} ${resovledRhs}${columnAlias}" + s"(${resovledLhs} ${op} ${resovledRhs})${columnAlias}" } } } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index b61619026..582c9d82a 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -60,7 +60,7 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { val resolvedArgs = args map (x => printExpr(x)) s"${function}(${resolvedArgs.mkString(", ")})" } - case BinaryOpExpr(lhs, op, rhs) => s"${printExpr(lhs)} ${op} ${printExpr(rhs)}" + case BinaryOpExpr(lhs, op, rhs) => s"(${printExpr(lhs)} ${op} ${printExpr(rhs)})" } } diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index b24266301..88958c3b8 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -55,24 +55,156 @@ style: "sql_extractor" } + deepdive.extraction.extractors.extraction_rule_9 { + sql: """ DROP VIEW IF EXISTS E; + CREATE VIEW E AS + SELECT (R0.k :: INT) AS column_0 + FROM b R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_14 { + sql: """ DROP VIEW IF EXISTS J; + CREATE VIEW J AS + SELECT R0.k AS "b.R0.k" + FROM b R0 + WHERE ((R0.k + R0.r) = 100 OR NOT (R0.k > 50)) + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_10 { + sql: """ DROP VIEW IF EXISTS F; + CREATE VIEW F AS + SELECT ((R0.k :: INT) + (R0.r :: INT)) AS column_0 + FROM b R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_5 { + sql: """ DROP VIEW IF EXISTS A; + CREATE VIEW A AS + SELECT (R0.k + (R0.r * (R0.k + R0.r))) AS column_0 + FROM b R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_13 { + sql: """ DROP VIEW IF EXISTS I; + CREATE VIEW I AS + SELECT R0.k AS "b.R0.k" + FROM b R0 + WHERE ((R0.k + R0.r) = 100 OR (R0.k > 50 AND R0.r < 10)) + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_11 { + sql: """ DROP VIEW IF EXISTS G; + CREATE VIEW G AS + SELECT R0.k AS "b.R0.k" + FROM b R0 + WHERE (R0.k + R0.r) = 100 + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_3 { + sql: """ DROP VIEW IF EXISTS Q; + CREATE VIEW Q AS + SELECT DISTINCT ('test' :: TEXT) AS column_0, 123 AS column_1, R0.k AS "a.R0.k" , unnest(R1.q) AS column_3 + FROM a R0, b R1, c R2 + WHERE R1.k = R0.k AND R2.s = (R1.p || R1.q) AND R2.n = 10 AND R2.t = 'foo' AND (R1.r > 100 OR (NOT (R1.r < 20) AND R1.r < 50)) + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_6 { + sql: """ DROP VIEW IF EXISTS B; + CREATE VIEW B AS + SELECT func((R0.k + (R0.r + R0.k))) AS column_0 + FROM b R0 + + """ + style: "sql_extractor" + + } + + deepdive.extraction.extractors.extraction_rule_4 { sql: """ DROP VIEW IF EXISTS P; CREATE VIEW P AS SELECT R1.p AS "b.R1.p" , R1.q AS "b.R1.q" , MAX(R1.r) AS column_2 FROM a R0, b R1 - WHERE R1.k = R0.k + WHERE R1.k = R0.k """ style: "sql_extractor" } - deepdive.extraction.extractors.extraction_rule_3 { - sql: """ DROP VIEW IF EXISTS Q; - CREATE VIEW Q AS - SELECT DISTINCT 'test' :: TEXT AS column_0, 123 AS column_1, R0.k AS "a.R0.k" , unnest(R1.q) AS column_3 - FROM a R0, b R1, c R2 - WHERE R1.k = R0.k AND R2.s = R1.p || R1.q AND R2.n = 10 AND R2.t = 'foo' AND (R1.r > 100 OR (NOT (R1.r < 20) AND R1.r < 50)) + deepdive.extraction.extractors.extraction_rule_7 { + sql: """ DROP VIEW IF EXISTS C; + CREATE VIEW C AS + SELECT func(func(func(R0.k))) AS column_0 + FROM b R0 + + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_12 { + sql: """ DROP VIEW IF EXISTS H; + CREATE VIEW H AS + SELECT R0.k AS "b.R0.k" + FROM b R0 + WHERE ((R0.k + R0.r) = 100 AND R0.k > 50) + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_15 { + sql: """ DROP VIEW IF EXISTS K; + CREATE VIEW K AS + SELECT R0.k AS "b.R0.k" + FROM b R0 + WHERE ((R0.k + R0.r) = 100 AND (NOT (R0.k > 50) OR R0.k = 40)) + """ + style: "sql_extractor" + + } + + + deepdive.extraction.extractors.extraction_rule_8 { + sql: """ DROP VIEW IF EXISTS D; + CREATE VIEW D AS + SELECT (func((R0.k * func2((R0.r + R0.k)))) + R0.k) AS column_0 + FROM b R0 + """ style: "sql_extractor" @@ -80,6 +212,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2] -deepdive.pipeline.pipelines.extraction: [extraction_rule_4, extraction_rule_3] -deepdive.pipeline.pipelines.endtoend: [extraction_rule_4, extraction_rule_3] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_3, extraction_rule_4, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_11, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_3, extraction_rule_4, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_11, extraction_rule_15, extraction_rule_6] deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index f0ba156c5..51c046b1d 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -2,6 +2,32 @@ a(k int). b(k int, p text, q text, r int). c(s text, n int, t text). +# comprehensive Q("test" :: TEXT, 123, id, unnest(y)) * :- a(id), b(id, x,y,z), c(x || y,10,"foo"), [z>100; !z < 20, z < 50]. -P(y, z, MAX(w)) :- a(x), b(x,y,z,w). \ No newline at end of file +# group by +P(y, z, MAX(w)) :- a(x), b(x,y,z,w). + +# expressions +A( x + (w * (x + w)) ) :- b(x,y,z,w). + +B( func(x + w + x) ) :- b(x,y,z,w). + +C( func(func(func(x))) ) :- b(x,y,z,w). + +D( func(x * func2(w + x)) + x ) :- b(x,y,z,w). + +E( x :: INT) :- b(x,y,z,w). + +F( (x :: INT) + (w :: INT) ) :- b(x,y,z,w). + +# conditions +G(x) :- b(x,y,z,w), x + w = 100. + +H(x) :- b(x,y,z,w), [x + w = 100, x > 50]. + +I(x) :- b(x,y,z,w), [x + w = 100; [x > 50, w < 10]]. + +J(x) :- b(x,y,z,w), [x + w = 100; !x > 50]. + +K(x) :- b(x,y,z,w), [x + w = 100, [!x > 50; x = 40]]. \ No newline at end of file diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index c858e7f0a..2598ef85a 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -9,12 +9,45 @@ c(s text, n int, t text). -Q( "test" :: TEXT, 123, id, unnest(y)) * :- +Q(( "test" :: TEXT), 123, id, unnest(y)) * :- a(id), b(id, x, y, z), - c(x || y, 10, "foo" ), [z > 100; [![z < 20], z < 50]]. + c((x || y), 10, "foo" ), [z > 100; [![z < 20], z < 50]]. P(y, z, MAX(w)) :- a(x), b(x, y, z, w). +A((x + (w * (x + w)))) :- + b(x, y, z, w). + +B(func((x + (w + x)))) :- + b(x, y, z, w). + +C(func(func(func(x)))) :- + b(x, y, z, w). + +D((func((x * func2((w + x)))) + x)) :- + b(x, y, z, w). + +E((x :: INT)) :- + b(x, y, z, w). + +F(((x :: INT) + (w :: INT))) :- + b(x, y, z, w). + +G(x) :- + b(x, y, z, w), (x + w) = 100. + +H(x) :- + b(x, y, z, w), [(x + w) = 100, x > 50]. + +I(x) :- + b(x, y, z, w), [(x + w) = 100; [x > 50, w < 10]]. + +J(x) :- + b(x, y, z, w), [(x + w) = 100; ![x > 50]]. + +K(x) :- + b(x, y, z, w), [(x + w) = 100, [![x > 50]; x = 40]]. + From 11a3c3177cfd757ce160275323a0c662cfd90f60 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Fri, 10 Jul 2015 19:00:15 -0700 Subject: [PATCH 166/347] Further refactoring 1. Redo typecast 2. Change type of terms in schema declaration 3. Simplify compileExpr and generateSQLBody 4. Correct bracketing for conditions --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 57 ++++++++++--------- .../ddlog/DeepDiveLogDeltaDeriver.scala | 5 +- .../ddlog/DeepDiveLogMergeDeriver.scala | 5 +- .../deepdive/ddlog/DeepDiveLogParser.scala | 14 ++--- .../ddlog/DeepDiveLogPrettyPrinter.scala | 5 +- .../ddlog/DeepDiveLogSemanticChecker.scala | 2 +- .../expressions/compile.expected | 6 +- .../expressions/input.ddl | 2 +- .../expressions/print.expected | 6 +- 9 files changed, 52 insertions(+), 50 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 7e6e7ebc0..32ea7de9b 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -115,7 +115,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C statements.foreach { case SchemaDeclaration(Attribute(r, terms, types), isQuery, vType) => { terms.zipWithIndex.foreach { - case (VarExpr(n), i) => + case (n, i) => schema += { (r,i) -> n } ground_relations += { r -> !isQuery } // record whether a query or a ground term. if (isQuery) variableType += { r -> vType.get } @@ -190,25 +190,35 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // resolve an expression def compileExpr(e: Expr, cq: ConjunctiveQuery, alias: AliasStyle, index : Int, isHead: Boolean) : String = { - def recurse(e: Expr, alias: AliasStyle, depth: Int) : String = { - // for columns without a name (constant, function call, binary operator), add an column index alias if necessary - val columnAlias = if (depth == 0 && isHead) s" AS column_${index}" else "" + // recursively compile an expression + def compileExprInner(e: Expr, alias: AliasStyle) : String = { e match { case VarExpr(name) => resolveColumn(name, cq, alias).get - case ConstExpr(value) => value + columnAlias + case ConstExpr(value) => value case FuncExpr(function, args, agg) => { - val resolvedArgs = args map (x => recurse(x, OriginalOnly, depth + 1)) + // note alias is overriden with OriginalOnly, because when an expression appears in + // the function, we only need it's column name, without " AS ..." aliasing, same below + val resolvedArgs = args map (x => compileExprInner(x, OriginalOnly)) val resolved = s"${function}(${resolvedArgs.mkString(", ")})" - resolved + columnAlias + resolved } case BinaryOpExpr(lhs, op, rhs) => { - val resovledLhs = recurse(lhs, OriginalOnly, depth + 1) - val resovledRhs = recurse(rhs, OriginalOnly, depth + 1) - s"(${resovledLhs} ${op} ${resovledRhs})${columnAlias}" + val resovledLhs = compileExprInner(lhs, OriginalOnly) + val resovledRhs = compileExprInner(rhs, OriginalOnly) + s"(${resovledLhs} ${op} ${resovledRhs})" + } + case TypecastExpr(lhs, rhs) => { + val resovledLhs = compileExprInner(lhs, OriginalOnly) + s"(${resovledLhs} :: ${rhs})" } } } - recurse(e, alias, 0) + // for columns without a name (constant, function call, binary operator), add an column index alias if necessary + val columnAlias = e match { + case x: VarExpr => "" + case _ => if (isHead) s" AS column_${index}" else "" + } + compileExprInner(e, alias) + columnAlias } // resolve a condition @@ -216,7 +226,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C cond match { case ComparisonCond(lhs, op, rhs) => s"${compileExpr(lhs, cq, OriginalOnly, 0, false)} ${op} ${compileExpr(rhs, cq, OriginalOnly, 0, false)}" - case NegationCond(c) => s"NOT (${compileCond(c, cq)})" + case NegationCond(c) => s"(NOT ${compileCond(c, cq)})" case CompoundCond(lhs, op, rhs) => { val resolvedLhs = s"${compileCond(lhs, cq)}" val resolvedRhs = s"${compileCond(rhs, cq)}" @@ -261,10 +271,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } // resolve conditions - val conditionStr = z.conditions(0) match { - case Some(c) => compileCond(c, z) - case None => "" - } + val conditionStr = z.conditions(0) map (compileCond(_, z)) // handle group by // map head terms, leaving out aggregation functions @@ -281,14 +288,11 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C s"\n GROUP BY ${groupbyTerms.mkString(", ")}" } - val whereClauseStrBody = List(whereClause.mkString(" AND "), conditionStr).filter(_ != "").mkString(" AND ") - val whereClauseStr = (whereClauseStrBody match { - case "" => "" - case _ => "WHERE " + whereClauseStrBody - }) + groupbyStr + val whereClauseConds = whereClause ++ conditionStr + val whereClauseStr = if (whereClauseConds isEmpty) "" else whereClauseConds.mkString("WHERE ", " AND ", "") s"""FROM ${ bodyNames } - ${ whereClauseStr }""" + ${ whereClauseStr }${groupbyStr}""" } // Group statements by head @@ -319,7 +323,7 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // Analyze the block visibility among statements def analyzeVisible(statements: List[Statement]) = { - extractionRuleGroupByHead.values foreach { value => visible += value(0) } + extractionRuleGroupByHead.values foreach { visible += _(0) } functionCallList foreach { visible += _ } } @@ -392,7 +396,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { if ((stmt.a.name startsWith "dd_new_") && (ss.inferenceRuleGroupByHead contains stmt.a.name)) { } else { var columnDecls = stmt.a.terms.zipWithIndex map { - case (VarExpr(name),i) => s"${name} ${stmt.a.types(i)}" + case (name,i) => s"${name} ${stmt.a.types(i)}" } if (stmt.isQuery) { val labelColumn = stmt.variableType match { @@ -639,10 +643,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { def compileVariableKey(ss: CompilationState): CompiledBlocks = { var keys = new ListBuffer[String]() for (stmt <- (ss.schemaDeclarationGroupByHead map (_._2)).flatten) { - var columnNames = stmt.a.terms map { - case VarExpr(name) => name - } - if (stmt.isQuery) keys += s"""${stmt.a.name} : [${columnNames.mkString(", ")}]""" + if (stmt.isQuery) keys += s"""${stmt.a.name} : [${stmt.a.terms.mkString(", ")}]""" } ss.mode match { case INCREMENTAL => List(s""" diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index ad96f33d3..0207f2e00 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -98,8 +98,9 @@ object DeepDiveLogDeltaDeriver{ // if (!stmt.isQuery) { incrementalStatement += ExtractionRule(ConjunctiveQuery( - Atom(incNewStmt.a.name, incNewStmt.a.terms), - List(List(Atom(stmt.a.name, stmt.a.terms)), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms))), + Atom(incNewStmt.a.name, incNewStmt.a.terms map { VarExpr(_) } ), + List(List(Atom(stmt.a.name, stmt.a.terms map { VarExpr(_) })), + List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms map { VarExpr(_) }))), List(None, None), false)) // } incrementalStatement.toList diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala index 3a8567688..77a942459 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -17,8 +17,9 @@ object DeepDiveLogMergeDeriver{ ) ) - ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, stmt.a.terms), - List(List(Atom(incNewStmt.a.name, incNewStmt.a.terms))), List(None), false)) + ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, stmt.a.terms map { VarExpr(_) }), + List(List(Atom(incNewStmt.a.name, incNewStmt.a.terms map { VarExpr(_) }))), + List(None), false)) } def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 1593d3665..8cc7b2e77 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -10,9 +10,6 @@ import scala.util.Try // *************************************** // * The union types for for the parser. * // *************************************** -// case class Variable(varName : String, relName : String, index : Int ) -// TODO make Atom a trait, and have multiple case classes, e.g., RelationAtom and CondExprAtom -// ddlog column variable type: constant or variable case class Variable(varName : String, relName : String, index : Int ) sealed trait Expr @@ -20,9 +17,10 @@ case class VarExpr(name: String) extends Expr case class ConstExpr(value: String) extends Expr case class FuncExpr(function: String, args: List[Expr], isAggregation: Boolean) extends Expr case class BinaryOpExpr(lhs: Expr, op: String, rhs: Expr) extends Expr +case class TypecastExpr(lhs: Expr, rhs: String) extends Expr case class Atom(name : String, terms : List[Expr]) -case class Attribute(name : String, terms : List[VarExpr], types : List[String]) +case class Attribute(name : String, terms : List[String], types : List[String]) case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[Cond]], isDistinct: Boolean) case class Column(name : String, t : String) case class BodyWithCondition(body: List[Atom], condition: Option[Cond]) @@ -87,7 +85,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def stringLiteralAsSqlString = stringLiteral ^^ { s => s"""'${s.stripPrefix("\"").stripSuffix("\"")}'""" } - def constant = stringLiteralAsSqlString | wholeNumber | "TRUE" | "FALSE" | "NULL" | "TEXT" | "INT" | "BOOLEAN" + def constant = stringLiteralAsSqlString | wholeNumber | "TRUE" | "FALSE" | "NULL" // Single-line comments beginning with # or // are supported by treating them as whiteSpace // C/Java/Scala style multi-line comments cannot be easily supported with RegexParsers unless we introduce a dedicated lexer. @@ -117,8 +115,8 @@ class DeepDiveLogParser extends JavaTokenParsers { def schemaDeclaration: Parser[SchemaDeclaration] = relationName ~ opt("?") ~ "(" ~ rep1sep(columnDeclaration, ",") ~ ")" ~ opt(dataType) ^^ { case (r ~ isQuery ~ "(" ~ attrs ~ ")" ~ vType) => { - val vars = attrs map { case(x) => VarExpr(x.name) } - var types = attrs map { case(x) => x.t } + val vars = attrs map (_.name) + var types = attrs map (_.t) val variableType = vType match { case None => if (isQuery != None) Some(BooleanType) else None case Some(s) => Some(s) @@ -134,7 +132,7 @@ class DeepDiveLogParser extends JavaTokenParsers { // expressions def expr : Parser[Expr] = ( lexpr ~ operator ~ expr ^^ { case (lhs ~ op ~ rhs) => BinaryOpExpr(lhs, op, rhs) } - | lexpr ~ typeOperator ~ constant ^^ { case (lhs ~ op ~ rhs) => BinaryOpExpr(lhs, op, ConstExpr(rhs)) } + | lexpr ~ typeOperator ~ columnType ^^ { case (lhs ~ _ ~ rhs) => TypecastExpr(lhs, rhs) } | lexpr ) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 582c9d82a..0d33cc5c1 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -17,7 +17,7 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { def print(stmt: SchemaDeclaration): String = { val columnDecls = stmt.a.terms.zipWithIndex map { - case (VarExpr(name),i) => s"${name} ${stmt.a.types(i)}" + case (name,i) => s"${name} ${stmt.a.types(i)}" } val prefix = s"${stmt.a.name}${if (stmt.isQuery) "?" else ""}(" val indentation = " " * prefix.length @@ -61,6 +61,7 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { s"${function}(${resolvedArgs.mkString(", ")})" } case BinaryOpExpr(lhs, op, rhs) => s"(${printExpr(lhs)} ${op} ${printExpr(rhs)})" + case TypecastExpr(lhs, rhs) => s"(${printExpr(lhs)} :: ${rhs})" } } @@ -68,7 +69,7 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { def printCond(cond: Cond) : String = { cond match { case ComparisonCond(lhs, op, rhs) => s"${printExpr(lhs)} ${op} ${printExpr(rhs)}" - case NegationCond(c) => s"![${printCond(c)}]" + case NegationCond(c) => s"[!${printCond(c)}]" case CompoundCond(lhs, op, rhs) => { op match { case LogicOperator.AND => s"[${printCond(lhs)}, ${printCond(rhs)}]" diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala index ca2f7fc36..13fc3ed87 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogSemanticChecker.scala @@ -76,7 +76,7 @@ object DeepDiveLogSemanticChecker extends DeepDiveLogHandler { stmt match { case SchemaDeclaration(Attribute(r, terms, types), isQuery, vType) => { if (isQuery) { - terms.foreach { case VarExpr(name) => + terms.foreach { case name => if (reservedSet contains name) error(stmt, s"""variable relation contains reserved column "${name}" """) } diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 88958c3b8..b2f3f249e 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -72,7 +72,7 @@ CREATE VIEW J AS SELECT R0.k AS "b.R0.k" FROM b R0 - WHERE ((R0.k + R0.r) = 100 OR NOT (R0.k > 50)) + WHERE ((R0.k + R0.r) = 100 OR (NOT R0.k > 50)) """ style: "sql_extractor" @@ -132,7 +132,7 @@ CREATE VIEW Q AS SELECT DISTINCT ('test' :: TEXT) AS column_0, 123 AS column_1, R0.k AS "a.R0.k" , unnest(R1.q) AS column_3 FROM a R0, b R1, c R2 - WHERE R1.k = R0.k AND R2.s = (R1.p || R1.q) AND R2.n = 10 AND R2.t = 'foo' AND (R1.r > 100 OR (NOT (R1.r < 20) AND R1.r < 50)) + WHERE R1.k = R0.k AND R2.s = (R1.p || R1.q) AND R2.n = 10 AND R2.t = 'foo' AND (R1.r > 100 OR ((NOT R1.r < 20) AND R1.r < 50)) """ style: "sql_extractor" @@ -192,7 +192,7 @@ CREATE VIEW K AS SELECT R0.k AS "b.R0.k" FROM b R0 - WHERE ((R0.k + R0.r) = 100 AND (NOT (R0.k > 50) OR R0.k = 40)) + WHERE ((R0.k + R0.r) = 100 AND ((NOT R0.k > 50) OR R0.k = 40)) """ style: "sql_extractor" diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index 51c046b1d..092889b4a 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -24,7 +24,7 @@ F( (x :: INT) + (w :: INT) ) :- b(x,y,z,w). # conditions G(x) :- b(x,y,z,w), x + w = 100. -H(x) :- b(x,y,z,w), [x + w = 100, x > 50]. +H(x) :- b(x,y,z,w), x + w = 100, x > 50. I(x) :- b(x,y,z,w), [x + w = 100; [x > 50, w < 10]]. diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index 2598ef85a..10bb024d5 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -12,7 +12,7 @@ c(s text, Q(( "test" :: TEXT), 123, id, unnest(y)) * :- a(id), b(id, x, y, z), - c((x || y), 10, "foo" ), [z > 100; [![z < 20], z < 50]]. + c((x || y), 10, "foo" ), [z > 100; [[!z < 20], z < 50]]. P(y, z, MAX(w)) :- a(x), @@ -46,8 +46,8 @@ I(x) :- b(x, y, z, w), [(x + w) = 100; [x > 50, w < 10]]. J(x) :- - b(x, y, z, w), [(x + w) = 100; ![x > 50]]. + b(x, y, z, w), [(x + w) = 100; [!x > 50]]. K(x) :- - b(x, y, z, w), [(x + w) = 100, [![x > 50]; x = 40]]. + b(x, y, z, w), [(x + w) = 100, [[!x > 50]; x = 40]]. From 34339f094fb4eecca2e78fcce2173b2da12491b4 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Sun, 12 Jul 2015 03:32:32 -0700 Subject: [PATCH 167/347] Add support for limit --- .../org/deepdive/ddlog/DeepDiveLogCompiler.scala | 11 ++++++++--- .../deepdive/ddlog/DeepDiveLogDeltaDeriver.scala | 4 ++-- .../deepdive/ddlog/DeepDiveLogMergeDeriver.scala | 2 +- .../org/deepdive/ddlog/DeepDiveLogParser.scala | 10 ++++++---- .../ddlog/DeepDiveLogPrettyPrinter.scala | 8 +++++++- .../expressions/compile.expected | 16 ++++++++++++++-- test/expected-output-test/expressions/input.ddl | 5 ++++- .../expressions/print.expected | 3 +++ 8 files changed, 45 insertions(+), 14 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 32ea7de9b..7c4c5df09 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -291,8 +291,13 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C val whereClauseConds = whereClause ++ conditionStr val whereClauseStr = if (whereClauseConds isEmpty) "" else whereClauseConds.mkString("WHERE ", " AND ", "") + val limitStr = z.limit match { + case Some(s) => s" LIMIT ${s}" + case None => "" + } + s"""FROM ${ bodyNames } - ${ whereClauseStr }${groupbyStr}""" + ${ whereClauseStr }${groupbyStr}${limitStr}""" } // Group statements by head @@ -439,7 +444,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { var inputQueries = new ListBuffer[String]() for (stmt <- stmts) { for (cqBody <- stmt.q.bodies) { - val tmpCq = ConjunctiveQuery(stmt.q.head, List(cqBody), stmt.q.conditions, stmt.q.isDistinct) + val tmpCq = stmt.q.copy(bodies = List(cqBody)) // Generate the body of the query. val qs = new QuerySchema( tmpCq ) @@ -556,7 +561,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { for (cqBody <- stmt.q.bodies) { // edge query val fakeBody = stmt.q.head +: cqBody - val fakeCQ = ConjunctiveQuery(stmt.q.head, List(fakeBody), stmt.q.conditions, stmt.q.isDistinct) // we will just use the fakeBody below. + val fakeCQ = stmt.q.copy(bodies = List(fakeBody)) val index = cqBody.length + 1 val qs2 = new QuerySchema( fakeCQ ) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 0207f2e00..71b647c92 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -70,7 +70,7 @@ object DeepDiveLogDeltaDeriver{ } } // TODO fix conditions - ConjunctiveQuery(incCqHead, incCqBodies.toList, incCqConditions.toList, cq.isDistinct) + ConjunctiveQuery(incCqHead, incCqBodies.toList, incCqConditions.toList, cq.isDistinct, cq.limit) } // Incremental scheme declaration, @@ -101,7 +101,7 @@ object DeepDiveLogDeltaDeriver{ Atom(incNewStmt.a.name, incNewStmt.a.terms map { VarExpr(_) } ), List(List(Atom(stmt.a.name, stmt.a.terms map { VarExpr(_) })), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms map { VarExpr(_) }))), - List(None, None), false)) + List(None, None), false, None)) // } incrementalStatement.toList } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala index 77a942459..f9eea977a 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -19,7 +19,7 @@ object DeepDiveLogMergeDeriver{ ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, stmt.a.terms map { VarExpr(_) }), List(List(Atom(incNewStmt.a.name, incNewStmt.a.terms map { VarExpr(_) }))), - List(None), false)) + List(None), false, None)) } def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 8cc7b2e77..2d4cd1786 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -21,7 +21,8 @@ case class TypecastExpr(lhs: Expr, rhs: String) extends Expr case class Atom(name : String, terms : List[Expr]) case class Attribute(name : String, terms : List[String], types : List[String]) -case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[Cond]], isDistinct: Boolean) +case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[Cond]], + isDistinct: Boolean, limit: Option[Int]) case class Column(name : String, t : String) case class BodyWithCondition(body: List[Atom], condition: Option[Cond]) @@ -185,9 +186,10 @@ class DeepDiveLogParser extends JavaTokenParsers { } def conjunctiveQuery : Parser[ConjunctiveQuery] = - cqHead ~ opt("*") ~ ":-" ~ rep1sep(cqBodyWithCondition, ";") ^^ { - case (headatom ~ isDistinct ~ ":-" ~ disjunctiveBodies) => - ConjunctiveQuery(headatom, disjunctiveBodies.map(_.body), disjunctiveBodies.map(_.condition), isDistinct != None) + cqHead ~ opt("*") ~ opt("|" ~> decimalNumber) ~ ":-" ~ rep1sep(cqBodyWithCondition, ";") ^^ { + case (headatom ~ isDistinct ~ limit ~ ":-" ~ disjunctiveBodies) => + ConjunctiveQuery(headatom, disjunctiveBodies.map(_.body), disjunctiveBodies.map(_.condition), + isDistinct != None, limit map (_.toInt)) } def relationType: Parser[RelationType] = diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 0d33cc5c1..dd19c0ebe 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -100,7 +100,13 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { } }).mkString(";\n ") - s"""${printAtom(cq.head)} ${if (cq.isDistinct) "*" else ""} :- + val distinctStr = if (cq.isDistinct) "*" else "" + val limitStr = cq.limit match { + case Some(s) => s" | ${s}" + case None => "" + } + + s"""${printAtom(cq.head)} ${distinctStr}${limitStr} :- | ${bodyWithCondition}""".stripMargin } diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index b2f3f249e..556d415b8 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -139,6 +139,18 @@ } + deepdive.extraction.extractors.extraction_rule_16 { + sql: """ DROP VIEW IF EXISTS L; + CREATE VIEW L AS + SELECT DISTINCT R0.k AS "b.R0.k" + FROM b R0 + LIMIT 100 + """ + style: "sql_extractor" + + } + + deepdive.extraction.extractors.extraction_rule_6 { sql: """ DROP VIEW IF EXISTS B; CREATE VIEW B AS @@ -212,6 +224,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2] -deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_3, extraction_rule_4, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_11, extraction_rule_15, extraction_rule_6] -deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_3, extraction_rule_4, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_11, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_3, extraction_rule_4, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_11, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_3, extraction_rule_4, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_11, extraction_rule_15, extraction_rule_6] deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index 092889b4a..7e1bd8597 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -30,4 +30,7 @@ I(x) :- b(x,y,z,w), [x + w = 100; [x > 50, w < 10]]. J(x) :- b(x,y,z,w), [x + w = 100; !x > 50]. -K(x) :- b(x,y,z,w), [x + w = 100, [!x > 50; x = 40]]. \ No newline at end of file +K(x) :- b(x,y,z,w), [x + w = 100, [!x > 50; x = 40]]. + +# limit +L(x) * | 100 :- b(x, y, z, w). \ No newline at end of file diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index 10bb024d5..7a29b3a82 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -51,3 +51,6 @@ J(x) :- K(x) :- b(x, y, z, w), [(x + w) = 100, [[!x > 50]; x = 40]]. +L(x) * | 100 :- + b(x, y, z, w). + From e183e70f95ddce46502ecb2fc889a666a3e67129 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Sun, 12 Jul 2015 17:36:13 -0700 Subject: [PATCH 168/347] Add support for plpy extractor --- .../org/deepdive/ddlog/DeepDiveLogCompiler.scala | 2 +- .../scala/org/deepdive/ddlog/DeepDiveLogParser.scala | 12 ++++++++---- .../deepdive/ddlog/DeepDiveLogPrettyPrinter.scala | 8 +++++--- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 7c4c5df09..d3cc3b256 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -529,7 +529,7 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { val udfDetails = (function.implementations collectFirst { case impl: RowWiseLineHandler => s"""udf: $${APP_HOME}\"/${StringEscapeUtils.escapeJava(impl.command)}\" - style: \"${impl.format}_extractor\" """ + style: \"${impl.style}_extractor\" """ }) if (udfDetails.isEmpty) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 2d4cd1786..b04fb5b35 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -63,7 +63,7 @@ case class RelationTypeDeclaration(names: List[String], types: List[String]) ext case class RelationTypeAlias(likeRelationName: String) extends RelationType trait FunctionImplementationDeclaration -case class RowWiseLineHandler(format: String, command: String) extends FunctionImplementationDeclaration +case class RowWiseLineHandler(style: String, command: String) extends FunctionImplementationDeclaration // Statements that will be parsed and compiled trait Statement @@ -203,9 +203,13 @@ class DeepDiveLogParser extends JavaTokenParsers { def inferenceMode = "mode" ~> "=" ~> inferenceModeType def functionImplementation : Parser[FunctionImplementationDeclaration] = - "implementation" ~ stringLiteralAsString ~ "handles" ~ ("tsv" | "json") ~ "lines" ^^ { - case (_ ~ command ~ _ ~ format ~ _) => RowWiseLineHandler(command=command, format=format) - } + ( "implementation" ~ stringLiteralAsString ~ "handles" ~ ("tsv" | "json") ~ "lines" ^^ { + case (_ ~ command ~ _ ~ style ~ _) => RowWiseLineHandler(command=command, style=style) + } + | "implementation" ~ stringLiteralAsString ~ "runs" ~ "as" ~ "plpy" ^^ { + case (_ ~ command ~ _ ~ _ ~ style) => RowWiseLineHandler(command=command, style=style) + } + ) def functionDeclaration : Parser[FunctionDeclaration] = ( "function" ~ functionName ~ "over" ~ relationType diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index dd19c0ebe..a0b3f8f65 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -36,9 +36,11 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { val inputType = print(stmt.inputType) val outputType = print(stmt.outputType) val impls = stmt.implementations map { - case impl: RowWiseLineHandler => - "\"" + StringEscapeUtils.escapeJava(impl.command) + "\"" + - s"\n handles ${impl.format} lines" + case impl: RowWiseLineHandler => { + val styleStr = if (impl.style == "plpy") s"\n runs as plpy" + else s"\n handles ${impl.style} lines" + "\"" + StringEscapeUtils.escapeJava(impl.command) + "\"" + styleStr + } } val modeStr = if (stmt.mode == null) "" else s" mode = ${stmt.mode}" s"""function ${stmt.functionName} From 0830c30718800dc88f8253a268aa27044c88bbf1 Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Sun, 12 Jul 2015 18:27:06 -0700 Subject: [PATCH 169/347] Add support for specifying factor functions. Change semantics specification --- examples/spouse_example.ddl | 3 +-- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 10 ++++--- .../ddlog/DeepDiveLogDeltaDeriver.scala | 2 +- .../deepdive/ddlog/DeepDiveLogParser.scala | 11 ++++---- .../ddlog/DeepDiveLogPrettyPrinter.scala | 6 +++-- .../ocr_example/print-incremental.expected | 24 ++++++++--------- .../ocr_example/print.expected | 14 +++++----- test/expected-output-test/semantics/input.ddl | 2 +- .../smoke_example/print-incremental.expected | 26 +++++++++---------- .../smoke_example/print.expected | 14 +++++----- .../spouse_example/print-incremental.expected | 3 +-- .../spouse_example/print.expected | 3 +-- .../spouse_example_new_feature/input.ddl | 2 +- .../print-incremental.expected | 2 +- .../print-incremental.expected | 6 ++--- .../print.expected | 6 ++--- 16 files changed, 63 insertions(+), 71 deletions(-) diff --git a/examples/spouse_example.ddl b/examples/spouse_example.ddl index 040545fce..1aaa9703f 100644 --- a/examples/spouse_example.ddl +++ b/examples/spouse_example.ddl @@ -73,5 +73,4 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l) label = l. has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) -weight = f -semantics = Imply. +weight = f. diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index d3cc3b256..4cac69c5d 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -593,9 +593,13 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { else "" }).filter(_ != "") val firstFunc = funcBody(0) - val function = ss.variableType get stmt.q.head.name match { - case Some(BooleanType) => stmt.semantics - case Some(MultinomialType(_)) => "Multinomial" + // if function is not specified, use Imply for boolean, and Multinomial for multinomial variables + val function = stmt.function match { + case Some(f) => f + case None => ss.variableType get stmt.q.head.name match { + case Some(BooleanType) => "Imply" + case Some(MultinomialType(_)) => "Multinomial" + } } func = s"""${function}(${(funcBody.tail :+ firstFunc).mkString(", ")})""" } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index 71b647c92..e2176fb46 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -143,7 +143,7 @@ object DeepDiveLogDeltaDeriver{ // Incremental inference rule, // create delta rules based on original extraction rule def transform(stmt: InferenceRule): List[Statement] = { - List(InferenceRule(transform(stmt.q, true, stmt.mode), stmt.weights, stmt.semantics)) + List(stmt.copy(q = transform(stmt.q, true, stmt.mode), mode = null)) } def generateIncrementalFunctionInputList(program: DeepDiveLog.Program) { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index b04fb5b35..6a39de807 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -71,7 +71,7 @@ case class SchemaDeclaration( a : Attribute , isQuery : Boolean, variableType : case class FunctionDeclaration( functionName: String, inputType: RelationType, outputType: RelationType, implementations: List[FunctionImplementationDeclaration], mode: String = null) extends Statement case class ExtractionRule(q : ConjunctiveQuery, supervision: String = null) extends Statement // Extraction rule case class FunctionCallRule(input : String, output : String, function : String) extends Statement // Extraction rule -case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, semantics : String = "Imply", mode: String = null) extends Statement // Weighted rule +case class InferenceRule(q : ConjunctiveQuery, weights : FactorWeight, function : Option[String], mode: String = null) extends Statement // Weighted rule // Parser class DeepDiveLogParser extends JavaTokenParsers { @@ -242,13 +242,14 @@ class DeepDiveLogParser extends JavaTokenParsers { def supervision = "label" ~> "=" ~> variableName - def semantics = "semantics" ~> "=" ~> semanticType + def factorFunctionName = "Imply" | "And" | "Equal" | "Or" | "Multinomial" | "Linear" | "Ratio" + def factorFunction = "function" ~> "=" ~> factorFunctionName def inferenceRule : Parser[InferenceRule] = - ( conjunctiveQuery ~ factorWeight ~ opt(semantics) ~ opt(inferenceMode) + ( conjunctiveQuery ~ factorWeight ~ opt(factorFunction) ~ opt(inferenceMode) ) ^^ { - case (q ~ weight ~ semantics ~ mode) => - InferenceRule(q, weight, semantics.getOrElse("Imply"), mode.getOrElse(null)) + case (q ~ weight ~ function ~ mode) => + InferenceRule(q, weight, function, mode.getOrElse(null)) } // rules or schema elements in arbitrary order diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index a0b3f8f65..a291fc8ab 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -132,8 +132,10 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { case UnknownFactorWeight(vs) => vs.mkString(", ") }) ) + - ( if (stmt.semantics == null) "" - else "\n semantics = " + stmt.semantics + ( stmt.function match { + case Some(f) => s"\n function = ${f}" + case None => "" + } ) + ".\n" } diff --git a/test/expected-output-test/ocr_example/print-incremental.expected b/test/expected-output-test/ocr_example/print-incremental.expected index e8ed0bbf8..fc4e69079 100644 --- a/test/expected-output-test/ocr_example/print-incremental.expected +++ b/test/expected-output-test/ocr_example/print-incremental.expected @@ -13,7 +13,7 @@ dd_new_features(id BIGSERIAL, feature_id INT, feature_val BOOLEAN). -dd_new_features(id, word_id, feature_id, feature_val) :- +dd_new_features(id, word_id, feature_id, feature_val) :- features(id, word_id, feature_id, feature_val); dd_delta_features(id, word_id, feature_id, feature_val). @@ -26,7 +26,7 @@ dd_delta_label1(wid INT, dd_new_label1(wid INT, val BOOLEAN). -dd_new_label1(wid, val) :- +dd_new_label1(wid, val) :- label1(wid, val); dd_delta_label1(wid, val). @@ -39,7 +39,7 @@ dd_delta_label2(wid INT, dd_new_label2(wid INT, val BOOLEAN). -dd_new_label2(wid, val) :- +dd_new_label2(wid, val) :- label2(wid, val); dd_delta_label2(wid, val). @@ -49,7 +49,7 @@ dd_delta_q1?(wid INT). dd_new_q1?(wid INT). -dd_new_q1(wid) :- +dd_new_q1(wid) :- q1(wid); dd_delta_q1(wid). @@ -59,25 +59,23 @@ dd_delta_q2?(wid INT). dd_new_q2?(wid INT). -dd_new_q2(wid) :- +dd_new_q2(wid) :- q2(wid); dd_delta_q2(wid). -dd_delta_q1(wid) :- +dd_delta_q1(wid) :- dd_delta_label1(wid, val) label = val. -dd_delta_q2(wid) :- +dd_delta_q2(wid) :- dd_delta_label2(wid, val) label = val. -dd_new_q1(wid) :- +dd_new_q1(wid) :- dd_delta_features(id, wid, fid, fval) - weight = fid - semantics = Imply. + weight = fid. -dd_new_q2(wid) :- +dd_new_q2(wid) :- dd_delta_features(id, wid, fid, fval) - weight = fid - semantics = Imply. + weight = fid. diff --git a/test/expected-output-test/ocr_example/print.expected b/test/expected-output-test/ocr_example/print.expected index da66f1d67..d3ff92171 100644 --- a/test/expected-output-test/ocr_example/print.expected +++ b/test/expected-output-test/ocr_example/print.expected @@ -13,21 +13,19 @@ q1?(wid INT). q2?(wid INT). -q1(wid) :- +q1(wid) :- label1(wid, val) label = val. -q2(wid) :- +q2(wid) :- label2(wid, val) label = val. -q1(wid) :- +q1(wid) :- features(id, wid, fid, fval) - weight = fid - semantics = Imply. + weight = fid. -q2(wid) :- +q2(wid) :- features(id, wid, fid, fval) - weight = fid - semantics = Imply. + weight = fid. diff --git a/test/expected-output-test/semantics/input.ddl b/test/expected-output-test/semantics/input.ddl index f18e77c06..6f3a9f36c 100644 --- a/test/expected-output-test/semantics/input.ddl +++ b/test/expected-output-test/semantics/input.ddl @@ -5,4 +5,4 @@ Q?(x int). Q(x) :- R(x, y, z); R(x, y, z), S(y, z, w); S(y, x, w), T(x, z, w) weight = y -semantics = imply. +function = Linear. diff --git a/test/expected-output-test/smoke_example/print-incremental.expected b/test/expected-output-test/smoke_example/print-incremental.expected index 3ed640bfc..3a17e8fba 100644 --- a/test/expected-output-test/smoke_example/print-incremental.expected +++ b/test/expected-output-test/smoke_example/print-incremental.expected @@ -7,7 +7,7 @@ dd_delta_person(person_id bigint, dd_new_person(person_id bigint, name text). -dd_new_person(person_id, name) :- +dd_new_person(person_id, name) :- person(person_id, name); dd_delta_person(person_id, name). @@ -20,7 +20,7 @@ dd_delta_person_has_cancer(person_id bigint, dd_new_person_has_cancer(person_id bigint, has_cancer boolean). -dd_new_person_has_cancer(person_id, has_cancer) :- +dd_new_person_has_cancer(person_id, has_cancer) :- person_has_cancer(person_id, has_cancer); dd_delta_person_has_cancer(person_id, has_cancer). @@ -33,7 +33,7 @@ dd_delta_person_smokes(person_id bigint, dd_new_person_smokes(person_id bigint, smokes boolean). -dd_new_person_smokes(person_id, smokes) :- +dd_new_person_smokes(person_id, smokes) :- person_smokes(person_id, smokes); dd_delta_person_smokes(person_id, smokes). @@ -46,7 +46,7 @@ dd_delta_friends(person_id bigint, dd_new_friends(person_id bigint, friend_id bigint). -dd_new_friends(person_id, friend_id) :- +dd_new_friends(person_id, friend_id) :- friends(person_id, friend_id); dd_delta_friends(person_id, friend_id). @@ -56,7 +56,7 @@ dd_delta_smoke?(person_id bigint). dd_new_smoke?(person_id bigint). -dd_new_smoke(person_id) :- +dd_new_smoke(person_id) :- smoke(person_id); dd_delta_smoke(person_id). @@ -66,31 +66,29 @@ dd_delta_cancer?(person_id bigint). dd_new_cancer?(person_id bigint). -dd_new_cancer(person_id) :- +dd_new_cancer(person_id) :- cancer(person_id); dd_delta_cancer(person_id). -dd_delta_smoke(pid) :- +dd_delta_smoke(pid) :- dd_delta_person_smokes(pid, l) label = l. -dd_delta_cancer(pid) :- +dd_delta_cancer(pid) :- dd_delta_person_has_cancer(pid, l) label = l. -dd_new_cancer(pid) :- +dd_new_cancer(pid) :- dd_delta_smoke(pid), person_smokes(pid, l); dd_new_smoke(pid), dd_delta_person_smokes(pid, l) - weight = 0.5 - semantics = Imply. + weight = 0.5. -dd_new_smoke(pid) :- +dd_new_smoke(pid) :- dd_delta_smoke(pid1), friends(pid1, pid); dd_new_smoke(pid1), dd_delta_friends(pid1, pid) - weight = 0.4 - semantics = Imply. + weight = 0.4. diff --git a/test/expected-output-test/smoke_example/print.expected b/test/expected-output-test/smoke_example/print.expected index eccbb0b6f..f9e81e54d 100644 --- a/test/expected-output-test/smoke_example/print.expected +++ b/test/expected-output-test/smoke_example/print.expected @@ -14,23 +14,21 @@ smoke?(person_id bigint). cancer?(person_id bigint). -smoke(pid) :- +smoke(pid) :- person_smokes(pid, l) label = l. -cancer(pid) :- +cancer(pid) :- person_has_cancer(pid, l) label = l. -cancer(pid) :- +cancer(pid) :- smoke(pid), person_smokes(pid, l) - weight = 0.5 - semantics = Imply. + weight = 0.5. -smoke(pid) :- +smoke(pid) :- smoke(pid1), friends(pid1, pid) - weight = 0.4 - semantics = Imply. + weight = 0.4. diff --git a/test/expected-output-test/spouse_example/print-incremental.expected b/test/expected-output-test/spouse_example/print-incremental.expected index 08a87f0a6..90523ea59 100644 --- a/test/expected-output-test/spouse_example/print-incremental.expected +++ b/test/expected-output-test/spouse_example/print-incremental.expected @@ -175,6 +175,5 @@ dd_new_has_spouse(rid) :- has_spouse_features(rid, f); dd_new_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) - weight = f - semantics = Imply. + weight = f. diff --git a/test/expected-output-test/spouse_example/print.expected b/test/expected-output-test/spouse_example/print.expected index db505c038..5e07df218 100644 --- a/test/expected-output-test/spouse_example/print.expected +++ b/test/expected-output-test/spouse_example/print.expected @@ -73,6 +73,5 @@ has_spouse(rid) :- has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) - weight = f - semantics = Imply. + weight = f. diff --git a/test/expected-output-test/spouse_example_new_feature/input.ddl b/test/expected-output-test/spouse_example_new_feature/input.ddl index 4e5711eea..0b1563441 100644 --- a/test/expected-output-test/spouse_example_new_feature/input.ddl +++ b/test/expected-output-test/spouse_example_new_feature/input.ddl @@ -75,4 +75,4 @@ has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) weight = f -semantics = Linear. +function = Linear. diff --git a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected index 8ebd7d939..7575d31f3 100644 --- a/test/expected-output-test/spouse_example_new_feature/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_feature/print-incremental.expected @@ -177,5 +177,5 @@ dd_new_has_spouse(rid) :- dd_new_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) weight = f - semantics = Linear. + function = Linear. diff --git a/test/expected-output-test/spouse_example_new_inference/print-incremental.expected b/test/expected-output-test/spouse_example_new_inference/print-incremental.expected index 3f1ab2e4a..71b4f119d 100644 --- a/test/expected-output-test/spouse_example_new_inference/print-incremental.expected +++ b/test/expected-output-test/spouse_example_new_inference/print-incremental.expected @@ -175,13 +175,11 @@ dd_new_has_spouse(rid) :- has_spouse_features(rid, f); dd_new_has_spouse_candidates(a, b, c, d, rid, l), dd_delta_has_spouse_features(rid, f) - weight = f - semantics = Imply. + weight = f. dd_new_has_spouse(rid) :- dd_new_has_spouse(rid2), dd_new_has_spouse_candidates(a1, b1, c1, d1, rid, l1), dd_new_has_spouse_candidates(b1, a1, c2, d2, rid2, l2) - weight = 3.0 - semantics = Imply. + weight = 3.0. diff --git a/test/expected-output-test/spouse_example_new_inference/print.expected b/test/expected-output-test/spouse_example_new_inference/print.expected index 3aefbc215..742839292 100644 --- a/test/expected-output-test/spouse_example_new_inference/print.expected +++ b/test/expected-output-test/spouse_example_new_inference/print.expected @@ -73,13 +73,11 @@ has_spouse(rid) :- has_spouse(rid) :- has_spouse_candidates(a, b, c, d, rid, l), has_spouse_features(rid, f) - weight = f - semantics = Imply. + weight = f. has_spouse(rid) :- has_spouse(rid2), has_spouse_candidates(a1, b1, c1, d1, rid, l1), has_spouse_candidates(b1, a1, c2, d2, rid2, l2) - weight = 3.0 - semantics = Imply. + weight = 3.0. From 6927e14e986a38c76745a01ffaf389c6f62c9cfd Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 13 Jul 2015 00:54:52 -0700 Subject: [PATCH 170/347] Add support for IN, EXISTS, ANY, ALL --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 7 +++++ .../deepdive/ddlog/DeepDiveLogParser.scala | 13 +++++++-- .../ddlog/DeepDiveLogPrettyPrinter.scala | 3 ++ .../expressions/compile.expected | 28 +++++++++++++++++-- .../expressions/input.ddl | 6 +++- .../expressions/print.expected | 6 ++++ 6 files changed, 58 insertions(+), 5 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 4cac69c5d..d75ccffc1 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -235,6 +235,13 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C case LogicOperator.OR => s"(${resolvedLhs} OR ${resolvedRhs})" } } + case InCond(lhs, rhs) => { + s"${compileExpr(lhs, cq, OriginalOnly, 0, false)} IN (SELECT * FROM ${rhs})" + } + case ExistCond(rhs) => s"EXISTS (SELECT * FROM ${rhs})" + case QuantifiedCond(lhs, op, quan, rhs) => { + s"${compileExpr(lhs, cq, OriginalOnly, 0, false)} ${op} ${quan} (SELECT * FROM ${rhs})" + } } } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 6a39de807..4f97e8268 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -31,6 +31,9 @@ sealed trait Cond case class ComparisonCond(lhs: Expr, op: String, rhs: Expr) extends Cond case class NegationCond(cond: Cond) extends Cond case class CompoundCond(lhs: Cond, op: LogicOperator.LogicOperator, rhs: Cond) extends Cond +case class InCond(lhs: Expr, relName: String) extends Cond +case class ExistCond(relName: String) extends Cond +case class QuantifiedCond(lhs: Expr, op: String, quantifier: String, relName: String) extends Cond // logic operators object LogicOperator extends Enumeration { @@ -153,6 +156,9 @@ class DeepDiveLogParser extends JavaTokenParsers { // conditional expressions def compareOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" | "IS" | "IS NOT" + def inOperator = "IN" + def quantifierOperator = "ANY" | "ALL" + def cond : Parser[Cond] = ( acond ~ (";") ~ cond ^^ { case (lhs ~ op ~ rhs) => CompoundCond(lhs, LogicOperator.OR, rhs) @@ -169,9 +175,12 @@ class DeepDiveLogParser extends JavaTokenParsers { | bcond ) def bcond : Parser[Cond] = - ( expr ~ compareOperator ~ expr ^^ { case (lhs ~ op ~ rhs) => - ComparisonCond(lhs, op, rhs) + ( expr ~ compareOperator ~ quantifierOperator ~ relationName ^^ { case (lhs ~ op ~ quan ~ rhs) => + QuantifiedCond(lhs, op, quan, rhs) } + | expr ~ compareOperator ~ expr ^^ { case (lhs ~ op ~ rhs) => ComparisonCond(lhs, op, rhs) } + | expr ~ inOperator ~ relationName ^^ { case (lhs ~ _ ~ rhs) => InCond(lhs, rhs) } + | "EXISTS" ~> relationName ^^ { ExistCond(_) } | "[" ~> cond <~ "]" ) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index a291fc8ab..ce7d79a65 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -78,6 +78,9 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { case LogicOperator.OR => s"[${printCond(lhs)}; ${printCond(rhs)}]" } } + case InCond(lhs, rhs) => s"${printExpr(lhs)} IN ${rhs}" + case ExistCond(rhs) => s"EXISTS ${rhs}" + case QuantifiedCond(lhs, op, quan, rhs) => s"${printExpr(lhs)} ${op} ${quan} ${rhs}" } } diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 556d415b8..427f95c91 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -67,6 +67,18 @@ } + deepdive.extraction.extractors.extraction_rule_18 { + sql: """ DROP VIEW IF EXISTS N; + CREATE VIEW N AS + SELECT R0.k AS "a.R0.k" + FROM a R0 + WHERE R0.k > ANY (SELECT * FROM G) + """ + style: "sql_extractor" + + } + + deepdive.extraction.extractors.extraction_rule_14 { sql: """ DROP VIEW IF EXISTS J; CREATE VIEW J AS @@ -103,6 +115,18 @@ } + deepdive.extraction.extractors.extraction_rule_17 { + sql: """ DROP VIEW IF EXISTS M; + CREATE VIEW M AS + SELECT R0.k AS "a.R0.k" + FROM a R0 + WHERE R0.k IN (SELECT * FROM G) + """ + style: "sql_extractor" + + } + + deepdive.extraction.extractors.extraction_rule_13 { sql: """ DROP VIEW IF EXISTS I; CREATE VIEW I AS @@ -224,6 +248,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2] -deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_3, extraction_rule_4, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_11, extraction_rule_15, extraction_rule_6] -deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_3, extraction_rule_4, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_11, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_3, extraction_rule_4, extraction_rule_18, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_17, extraction_rule_11, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_3, extraction_rule_4, extraction_rule_18, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_17, extraction_rule_11, extraction_rule_15, extraction_rule_6] deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index 7e1bd8597..74070c3a8 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -33,4 +33,8 @@ J(x) :- b(x,y,z,w), [x + w = 100; !x > 50]. K(x) :- b(x,y,z,w), [x + w = 100, [!x > 50; x = 40]]. # limit -L(x) * | 100 :- b(x, y, z, w). \ No newline at end of file +L(x) * | 100 :- b(x, y, z, w). + +M(x) :- a(x), x IN G. + +N(x) :- a(x), x > ANY G. \ No newline at end of file diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index 7a29b3a82..a1a72da16 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -54,3 +54,9 @@ K(x) :- L(x) * | 100 :- b(x, y, z, w). +M(x) :- + a(x), x IN G. + +N(x) :- + a(x), x > ANY G. + From 36b87db405f0e5e30c9f2e1443b6f6d67217ae1e Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 13 Jul 2015 02:17:43 -0700 Subject: [PATCH 171/347] Add support for left outer join --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 21 ++++++++++++++++--- .../deepdive/ddlog/DeepDiveLogParser.scala | 7 ++++++- .../ddlog/DeepDiveLogPrettyPrinter.scala | 1 + .../expressions/compile.expected | 16 ++++++++++++-- .../expressions/input.ddl | 4 +++- .../expressions/print.expected | 4 ++++ 6 files changed, 46 insertions(+), 7 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index d75ccffc1..1d6a15c4a 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -242,13 +242,13 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C case QuantifiedCond(lhs, op, quan, rhs) => { s"${compileExpr(lhs, cq, OriginalOnly, 0, false)} ${op} ${quan} (SELECT * FROM ${rhs})" } + case _ => "" } } // This is generic code that generates the FROM with positional aliasing R0, R1, etc. // and the corresponding WHERE clause (equating all variables) def generateSQLBody(z : ConjunctiveQuery) : String = { - val bodyNames = ( z.bodies(0).zipWithIndex map { case(x,i) => s"${x.name} R${i}"}).mkString(", ") // Simple logic for the where clause, first find every first occurence of a // and stick it in a map. val qs = new QuerySchema(z) @@ -278,7 +278,10 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C } // resolve conditions - val conditionStr = z.conditions(0) map (compileCond(_, z)) + val conditionStr = z.conditions(0).flatMap { + case x: OuterJoinCond => None + case x: Cond => Some(compileCond(x, z)) + } // handle group by // map head terms, leaving out aggregation functions @@ -303,7 +306,19 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C case None => "" } - s"""FROM ${ bodyNames } + // compile outer join + val bodyNames = z.bodies(0) map (_.name) + val outerJoins = z.conditions(0).collect { case x: OuterJoinCond => x } + val outerRelations = outerJoins map (_.relName) toSet + val outerJoinStr = (outerJoins map { case OuterJoinCond(name, cond) => + s" LEFT OUTER JOIN ${name} R${bodyNames.indexOf(name)} ON ${compileCond(cond, z)} " + }).mkString + + val fromBodyNames = (z.bodies(0).filterNot(outerRelations contains _.name).zipWithIndex map + { case(x,i) => s"${x.name} R${i}"}).mkString(", ") + val fromClause = fromBodyNames + outerJoinStr + + s"""FROM ${ fromClause } ${ whereClauseStr }${groupbyStr}${limitStr}""" } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 4f97e8268..07f953ef0 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -34,6 +34,7 @@ case class CompoundCond(lhs: Cond, op: LogicOperator.LogicOperator, rhs: Cond) e case class InCond(lhs: Expr, relName: String) extends Cond case class ExistCond(relName: String) extends Cond case class QuantifiedCond(lhs: Expr, op: String, quantifier: String, relName: String) extends Cond +case class OuterJoinCond(relName: String, cond: Cond) extends Cond // logic operators object LogicOperator extends Enumeration { @@ -158,9 +159,13 @@ class DeepDiveLogParser extends JavaTokenParsers { def compareOperator = "LIKE" | ">" | "<" | ">=" | "<=" | "!=" | "=" | "IS" | "IS NOT" def inOperator = "IN" def quantifierOperator = "ANY" | "ALL" + def outerJoinOperator = "OUTER" def cond : Parser[Cond] = - ( acond ~ (";") ~ cond ^^ { case (lhs ~ op ~ rhs) => + ( outerJoinOperator ~> "(" ~> relationName ~ ":" ~ cond <~ ")" ^^ { case (relName ~ _ ~ cond) => + OuterJoinCond(relName, cond) + } + | acond ~ (";") ~ cond ^^ { case (lhs ~ op ~ rhs) => CompoundCond(lhs, LogicOperator.OR, rhs) } | acond diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index ce7d79a65..ef68c1616 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -81,6 +81,7 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { case InCond(lhs, rhs) => s"${printExpr(lhs)} IN ${rhs}" case ExistCond(rhs) => s"EXISTS ${rhs}" case QuantifiedCond(lhs, op, quan, rhs) => s"${printExpr(lhs)} ${op} ${quan} ${rhs}" + case OuterJoinCond(relName, c) => s"OUTER(${relName}: ${printCond(c)})" } } diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index 427f95c91..e65b2a16b 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -235,6 +235,18 @@ } + deepdive.extraction.extractors.extraction_rule_19 { + sql: """ DROP VIEW IF EXISTS O; + CREATE VIEW O AS + SELECT R0.k AS "a.R0.k" + FROM a R0 LEFT OUTER JOIN b R1 ON R0.k = R1.k + + """ + style: "sql_extractor" + + } + + deepdive.extraction.extractors.extraction_rule_8 { sql: """ DROP VIEW IF EXISTS D; CREATE VIEW D AS @@ -248,6 +260,6 @@ deepdive.pipeline.run: ${PIPELINE} deepdive.pipeline.pipelines.initdb: [extraction_rule_1, extraction_rule_0, extraction_rule_2] -deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_3, extraction_rule_4, extraction_rule_18, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_17, extraction_rule_11, extraction_rule_15, extraction_rule_6] -deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_3, extraction_rule_4, extraction_rule_18, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_17, extraction_rule_11, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.extraction: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_19, extraction_rule_3, extraction_rule_4, extraction_rule_18, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_17, extraction_rule_11, extraction_rule_15, extraction_rule_6] +deepdive.pipeline.pipelines.endtoend: [extraction_rule_7, extraction_rule_12, extraction_rule_16, extraction_rule_19, extraction_rule_3, extraction_rule_4, extraction_rule_18, extraction_rule_8, extraction_rule_10, extraction_rule_13, extraction_rule_5, extraction_rule_9, extraction_rule_14, extraction_rule_17, extraction_rule_11, extraction_rule_15, extraction_rule_6] deepdive.pipeline.pipelines.cleanup: [cleanup] diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index 74070c3a8..e883b3f57 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -37,4 +37,6 @@ L(x) * | 100 :- b(x, y, z, w). M(x) :- a(x), x IN G. -N(x) :- a(x), x > ANY G. \ No newline at end of file +N(x) :- a(x), x > ANY G. + +O(x) :- a(x), b(q,w,e,r), OUTER(b: x = q). \ No newline at end of file diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index a1a72da16..52ef4df1c 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -60,3 +60,7 @@ M(x) :- N(x) :- a(x), x > ANY G. +O(x) :- + a(x), + b(q, w, e, r), OUTER(b: x = q). + From df5bc238870ba53a6a3e9782ef86e2573eb4496c Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 13 Jul 2015 21:47:20 -0700 Subject: [PATCH 172/347] Add support for binding weight to a constant --- .../org/deepdive/ddlog/DeepDiveLogCompiler.scala | 6 +++--- .../scala/org/deepdive/ddlog/DeepDiveLogParser.scala | 12 +++++------- .../deepdive/ddlog/DeepDiveLogPrettyPrinter.scala | 1 + 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 1d6a15c4a..ece92d94b 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -597,8 +597,8 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { // weight string val uwStr = stmt.weights match { - case KnownFactorWeight(x) => None case UnknownFactorWeight(w) => Some(w.flatMap(s => ss.resolveColumn(s, fakeCQ, OriginalAndAlias)).mkString(", ")) + case _ => None } val selectStr = (List(variableIdsStr, uwStr) flatten).mkString(", ") @@ -631,9 +631,9 @@ object DeepDiveLogCompiler extends DeepDiveLogHandler { case KnownFactorWeight(x) => s"${x}" case UnknownFactorWeight(w) => { val weightVar = w.flatMap(s => ss.resolveColumn(s, fakeCQ, AliasOnly)).mkString(", ") - if (weightVar == "") "?" - else s"?(${weightVar})" + s"?(${weightVar})" } + case UnknownFactorWeightBindingToConst(w) => "?" } } val blockName = ss.resolveInferenceBlockName(stmt) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 07f953ef0..1159533a7 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -53,14 +53,11 @@ case class MultinomialType(numCategories: Int) extends VariableType { def cardinality = numCategories } -sealed trait FactorWeight { - def variables : List[String] -} +sealed trait FactorWeight -case class KnownFactorWeight(value: Double) extends FactorWeight { - def variables = Nil -} +case class KnownFactorWeight(value: Double) extends FactorWeight case class UnknownFactorWeight(variables: List[String]) extends FactorWeight +case class UnknownFactorWeightBindingToConst(value: String) extends FactorWeight trait RelationType case class RelationTypeDeclaration(names: List[String], types: List[String]) extends RelationType @@ -252,7 +249,8 @@ class DeepDiveLogParser extends JavaTokenParsers { def constantWeight = floatingPointNumberAsDouble ^^ { KnownFactorWeight(_) } def unknownWeight = repsep(variableName, ",") ^^ { UnknownFactorWeight(_) } - def factorWeight = "weight" ~> "=" ~> (constantWeight | unknownWeight) + def unknownWeightBindingToConst = stringLiteralAsString ^^ { UnknownFactorWeightBindingToConst(_) } + def factorWeight = "weight" ~> "=" ~> (constantWeight | unknownWeightBindingToConst | unknownWeight) def supervision = "label" ~> "=" ~> variableName diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index ef68c1616..92227fbc1 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -134,6 +134,7 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { else "\n weight = " + (stmt.weights match { case KnownFactorWeight(w) => w.toString case UnknownFactorWeight(vs) => vs.mkString(", ") + case UnknownFactorWeightBindingToConst(vs) => "\"" + vs + "\"" }) ) + ( stmt.function match { From 8d38f48ace149c8a4f27e9815773c7fa22b135ae Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Mon, 13 Jul 2015 23:53:23 -0700 Subject: [PATCH 173/347] Fix aggregation in nested expressions --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 21 ++++++++++++++----- .../expressions/compile.expected | 1 + 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index ece92d94b..59aebf410 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -283,15 +283,26 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C case x: Cond => Some(compileCond(x, z)) } - // handle group by - // map head terms, leaving out aggregation functions - val groupbyTerms = z.head.terms.zipWithIndex map { case (expr, index) => + // check if an expression contains an aggregation function + def containsAggregation(expr: Expr) : Boolean = { expr match { - case FuncExpr(f, args, agg) => if (agg) None else Some("") - case _ => Some(compileExpr(expr, z, OriginalOnly, index, false)) + case VarExpr(name) => false + case ConstExpr(value) => false + case FuncExpr(function, args, agg) => if (agg) agg else { + args.map(containsAggregation).foldLeft(false)(_ || _) + } + case BinaryOpExpr(lhs, op, rhs) => containsAggregation(lhs) || containsAggregation(rhs) + case TypecastExpr(lhs, rhs) => containsAggregation(lhs) } } + // handle group by + // map head terms, leaving out aggregation functions + val groupbyTerms = z.head.terms.zipWithIndex flatMap { case (expr, index) => + if (containsAggregation(expr)) None + else Some(compileExpr(expr, z, OriginalOnly, index, false)) + } + val groupbyStr = if (groupbyTerms.size == z.head.terms.size) { "" } else { diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index e65b2a16b..dbfb402bf 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -193,6 +193,7 @@ SELECT R1.p AS "b.R1.p" , R1.q AS "b.R1.q" , MAX(R1.r) AS column_2 FROM a R0, b R1 WHERE R1.k = R0.k + GROUP BY R1.p, R1.q """ style: "sql_extractor" From 7cfd485be11ffa6d1b9947c43ff9bb7312f44d2f Mon Sep 17 00:00:00 2001 From: Feiran Wang Date: Tue, 14 Jul 2015 11:55:52 -0700 Subject: [PATCH 174/347] Fix outer join --- .../deepdive/ddlog/DeepDiveLogCompiler.scala | 5 ++-- .../ddlog/DeepDiveLogDeltaDeriver.scala | 10 ++++--- .../ddlog/DeepDiveLogMergeDeriver.scala | 2 +- .../deepdive/ddlog/DeepDiveLogParser.scala | 22 +++++++++------- .../ddlog/DeepDiveLogPrettyPrinter.scala | 26 ++++++++++++------- .../expressions/compile.expected | 2 +- .../expressions/input.ddl | 2 +- .../expressions/print.expected | 2 +- 8 files changed, 42 insertions(+), 29 deletions(-) diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala index 59aebf410..efbb7b180 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogCompiler.scala @@ -319,9 +319,8 @@ class CompilationState( statements : DeepDiveLog.Program, config : DeepDiveLog.C // compile outer join val bodyNames = z.bodies(0) map (_.name) - val outerJoins = z.conditions(0).collect { case x: OuterJoinCond => x } - val outerRelations = outerJoins map (_.relName) toSet - val outerJoinStr = (outerJoins map { case OuterJoinCond(name, cond) => + val outerRelations = z.outerJoinConds(0) map (_.relName) toSet + val outerJoinStr = (z.outerJoinConds(0) map { case OuterJoinCond(name, cond) => s" LEFT OUTER JOIN ${name} R${bodyNames.indexOf(name)} ON ${compileCond(cond, z)} " }).mkString diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala index e2176fb46..4da0a9d63 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogDeltaDeriver.scala @@ -33,6 +33,7 @@ object DeepDiveLogDeltaDeriver{ var incCqBodies = new ListBuffer[List[Atom]]() var incCqConditions = new ListBuffer[Option[Cond]]() + var incCqOuterConditions = new ListBuffer[List[OuterJoinCond]]() // New incremental bodies cq.bodies zip cq.conditions foreach { case (body, cond) => // Delta body @@ -53,6 +54,7 @@ object DeepDiveLogDeltaDeriver{ if (mode == "inc") { incCqBodies += incNewBody incCqConditions += cond + incCqOuterConditions += List() } else { for (i <- index to (body.length - 1)) { var newBody = new ListBuffer[Atom]() @@ -64,13 +66,15 @@ object DeepDiveLogDeltaDeriver{ else if (j == i) newBody += incDeltaBody(j) incCqConditions += cond + // NOTE we don't support outer join in derivation + incCqOuterConditions += List() } incCqBodies += newBody.toList } } } - // TODO fix conditions - ConjunctiveQuery(incCqHead, incCqBodies.toList, incCqConditions.toList, cq.isDistinct, cq.limit) + cq.copy(head = incCqHead, bodies = incCqBodies.toList, conditions = incCqConditions.toList, + outerJoinConds = incCqOuterConditions.toList) } // Incremental scheme declaration, @@ -101,7 +105,7 @@ object DeepDiveLogDeltaDeriver{ Atom(incNewStmt.a.name, incNewStmt.a.terms map { VarExpr(_) } ), List(List(Atom(stmt.a.name, stmt.a.terms map { VarExpr(_) })), List(Atom(incDeltaStmt.a.name, incDeltaStmt.a.terms map { VarExpr(_) }))), - List(None, None), false, None)) + List(None, None), false, None, List(List(), List()))) // } incrementalStatement.toList } diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala index f9eea977a..ee86ae479 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogMergeDeriver.scala @@ -19,7 +19,7 @@ object DeepDiveLogMergeDeriver{ ExtractionRule(ConjunctiveQuery(Atom(stmt.a.name, stmt.a.terms map { VarExpr(_) }), List(List(Atom(incNewStmt.a.name, incNewStmt.a.terms map { VarExpr(_) }))), - List(None), false, None)) + List(None), false, None, List(List()))) } def derive(program: DeepDiveLog.Program): DeepDiveLog.Program = { diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala index 1159533a7..e02d3915a 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogParser.scala @@ -22,9 +22,9 @@ case class TypecastExpr(lhs: Expr, rhs: String) extends Expr case class Atom(name : String, terms : List[Expr]) case class Attribute(name : String, terms : List[String], types : List[String]) case class ConjunctiveQuery(head: Atom, bodies: List[List[Atom]], conditions: List[Option[Cond]], - isDistinct: Boolean, limit: Option[Int]) + isDistinct: Boolean, limit: Option[Int], outerJoinConds: List[List[OuterJoinCond]]) case class Column(name : String, t : String) -case class BodyWithCondition(body: List[Atom], condition: Option[Cond]) +case class BodyWithCondition(body: List[Atom], condition: Option[Cond], outerJoinCond: List[OuterJoinCond]) // condition sealed trait Cond @@ -34,7 +34,7 @@ case class CompoundCond(lhs: Cond, op: LogicOperator.LogicOperator, rhs: Cond) e case class InCond(lhs: Expr, relName: String) extends Cond case class ExistCond(relName: String) extends Cond case class QuantifiedCond(lhs: Expr, op: String, quantifier: String, relName: String) extends Cond -case class OuterJoinCond(relName: String, cond: Cond) extends Cond +case class OuterJoinCond(relName: String, cond: Cond) // logic operators object LogicOperator extends Enumeration { @@ -159,10 +159,7 @@ class DeepDiveLogParser extends JavaTokenParsers { def outerJoinOperator = "OUTER" def cond : Parser[Cond] = - ( outerJoinOperator ~> "(" ~> relationName ~ ":" ~ cond <~ ")" ^^ { case (relName ~ _ ~ cond) => - OuterJoinCond(relName, cond) - } - | acond ~ (";") ~ cond ^^ { case (lhs ~ op ~ rhs) => + ( acond ~ (";") ~ cond ^^ { case (lhs ~ op ~ rhs) => CompoundCond(lhs, LogicOperator.OR, rhs) } | acond @@ -186,21 +183,26 @@ class DeepDiveLogParser extends JavaTokenParsers { | "[" ~> cond <~ "]" ) + def outerJoinCond = + outerJoinOperator ~> "(" ~> relationName ~ ":" ~ cond <~ ")" ^^ { case (relName ~ _ ~ cond) => + OuterJoinCond(relName, cond) + } + def cqBodyAtom: Parser[Atom] = relationName ~ "(" ~ repsep(expr, ",") ~ ")" ^^ { case (r ~ "(" ~ patterns ~ ")") => Atom(r, patterns) } def cqBody: Parser[List[Atom]] = rep1sep(cqBodyAtom, ",") - def cqBodyWithCondition = cqBody ~ ("," ~> cond).? ^^ { - case (b ~ c) => BodyWithCondition(b, c) + def cqBodyWithCondition = cqBody ~ opt("," ~> rep1sep(outerJoinCond, ",")) ~ ("," ~> cond).? ^^ { + case (b ~ o ~ c) => BodyWithCondition(b, c, o.getOrElse(List())) } def conjunctiveQuery : Parser[ConjunctiveQuery] = cqHead ~ opt("*") ~ opt("|" ~> decimalNumber) ~ ":-" ~ rep1sep(cqBodyWithCondition, ";") ^^ { case (headatom ~ isDistinct ~ limit ~ ":-" ~ disjunctiveBodies) => ConjunctiveQuery(headatom, disjunctiveBodies.map(_.body), disjunctiveBodies.map(_.condition), - isDistinct != None, limit map (_.toInt)) + isDistinct != None, limit map (_.toInt), disjunctiveBodies.map(_.outerJoinCond)) } def relationType: Parser[RelationType] = diff --git a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala index 92227fbc1..98060f740 100644 --- a/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala +++ b/src/main/scala/org/deepdive/ddlog/DeepDiveLogPrettyPrinter.scala @@ -81,7 +81,6 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { case InCond(lhs, rhs) => s"${printExpr(lhs)} IN ${rhs}" case ExistCond(rhs) => s"EXISTS ${rhs}" case QuantifiedCond(lhs, op, quan, rhs) => s"${printExpr(lhs)} ${op} ${quan} ${rhs}" - case OuterJoinCond(relName, c) => s"OUTER(${relName}: ${printCond(c)})" } } @@ -94,16 +93,25 @@ object DeepDiveLogPrettyPrinter extends DeepDiveLogHandler { s"${(a map printAtom).mkString(",\n ")}" } - val conditionList = cq.conditions map { - case Some(x) => Some(printCond(x)) - case None => None + var conditionList = cq.conditions map { + case Some(x) => printCond(x) + case None => "" } + + def printOuter(cond: OuterJoinCond) = { + s"OUTER(${cond.relName}: ${printCond(cond.cond)})" + } + + def printOuterList(conds: List[OuterJoinCond]) : String = { + s"${(conds map printOuter).mkString(",\n ")}" + } + + val outerList = cq.outerJoinConds map (printOuterList(_)) + + // conditionList = outerList ++ conditionList val bodyList = cq.bodies map printListAtom - val bodyWithCondition = (bodyList zip conditionList map { case(a,b) => - b match { - case Some(c) => s"${a}, ${c}" - case None => a - } + val bodyWithCondition = ((bodyList, outerList, conditionList).zipped.toList map { case(a,b,c) => + List(a,b,c).filter(_ != "").mkString(", ") }).mkString(";\n ") val distinctStr = if (cq.isDistinct) "*" else "" diff --git a/test/expected-output-test/expressions/compile.expected b/test/expected-output-test/expressions/compile.expected index dbfb402bf..d8788e85f 100644 --- a/test/expected-output-test/expressions/compile.expected +++ b/test/expected-output-test/expressions/compile.expected @@ -241,7 +241,7 @@ CREATE VIEW O AS SELECT R0.k AS "a.R0.k" FROM a R0 LEFT OUTER JOIN b R1 ON R0.k = R1.k - + WHERE R0.k = R1.r """ style: "sql_extractor" diff --git a/test/expected-output-test/expressions/input.ddl b/test/expected-output-test/expressions/input.ddl index e883b3f57..fcf87cfea 100644 --- a/test/expected-output-test/expressions/input.ddl +++ b/test/expected-output-test/expressions/input.ddl @@ -39,4 +39,4 @@ M(x) :- a(x), x IN G. N(x) :- a(x), x > ANY G. -O(x) :- a(x), b(q,w,e,r), OUTER(b: x = q). \ No newline at end of file +O(x) :- a(x), b(q,w,e,r), OUTER(b: x = q), x = r. \ No newline at end of file diff --git a/test/expected-output-test/expressions/print.expected b/test/expected-output-test/expressions/print.expected index 52ef4df1c..88df9d43b 100644 --- a/test/expected-output-test/expressions/print.expected +++ b/test/expected-output-test/expressions/print.expected @@ -62,5 +62,5 @@ N(x) :- O(x) :- a(x), - b(q, w, e, r), OUTER(b: x = q). + b(q, w, e, r), OUTER(b: x = q), x = r. From 6ee7d965a1344b0d0d0819daf0de67c01f3f9264 Mon Sep 17 00:00:00 2001 From: Jaeho Shin Date: Thu, 16 Jul 2015 01:05:07 -0700 Subject: [PATCH 175/347] Embeds SBT --- Makefile | 6 ++++++ project/sbt/sbt | 3 +++ project/sbt/sbt-launch.jar | Bin 0 -> 1132093 bytes 3 files changed, 9 insertions(+) create mode 100755 project/sbt/sbt create mode 100644 project/sbt/sbt-launch.jar diff --git a/Makefile b/Makefile index a2aa295f5..91e440c6a 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,12 @@ clean: scala-clean rm -f $(JAR) $(wildcard test/*/*/*.actual) find test/ -name '*.bats' -type l -exec rm -f {} + +PATH += :$(shell pwd)/project/sbt +export PATH +ifeq ($(shell uname),Darwin) +SHELL := /bin/bash # Mac requires SHELL to be forced to bash +endif + include scala.mk # defines scala-build, scala-test-build, scala-assembly-jar, scala-clean targets diff --git a/project/sbt/sbt b/project/sbt/sbt new file mode 100755 index 000000000..be43b76e8 --- /dev/null +++ b/project/sbt/sbt @@ -0,0 +1,3 @@ +#!/bin/bash +SBT_OPTS="-Xms512M -Xmx1536M -Xss1M -XX:+CMSClassUnloadingEnabled -XX:MaxPermSize=256M" +java $SBT_OPTS -jar `dirname $0`/sbt-launch.jar "$@" diff --git a/project/sbt/sbt-launch.jar b/project/sbt/sbt-launch.jar new file mode 100644 index 0000000000000000000000000000000000000000..76342c66822d832bd88a4630c50274c24d539252 GIT binary patch literal 1132093 zcmaI6W3VPsyDZq-w%yydZM(P4ciXmY+qP}nwr%w`Zhv!TB4*~^b5>PFRMlEh^(QNz z%zR2g8Uz&e7v#S!P61u`*Z*rE{X4g|HFmZF(AhcI+5sG#%mI$n(zH}FQ?re%3(PCb zbF@-yN5@i@VfZp82onF$#orICj$jZRcFmVLUaVMA%M2Hj6iEZDJJrrNIy#mSQJ$n(gBSaDvxk)04I+s1A=Tn zFgO|sL!Lg8H#A$2H8i%cO6Y__^++mo+tggyNFs!X+$vKCz$~-JQgZ}UIBKcI(yXJf z^rhY8h|U#oqfgYTfa@98&I=XzjfxOZ0&ZOkAORuq--K7cuj0YSIj+=CMp3V zzY^xA?-)8UYd9-NJS$KgE7U_P*hMR3D7IjBEAU&5fHbAxHZmXJ|3KgRWY)P6_}4E3 zu>T4AnEyxU{}=GH)nU|em(jlIX{JoD@HK#;Nc4Zl#U0lp?cOV*{Dy-j0Yi>xUN*qe zPXw59GfM|-tXNoAR$DjEzgKNkm@lg)wh0tjEL4Up0b{lXFH#$flbUyCGgzZb8)a}%vmTXG-vsD+extqkgw zeC-VKm2eXs;3xf*9_S_g6d&*<{gfZ5Oo$`dPaZ;+d?kq|htfcPOQ47o$tQ^*saHn! zkwld?Oec{dQKSk#PEfB0Ee@;01mrg|XhhYo4IVI|6BynrqUY+{!$n$ZcqI(~rQtgk z#y{X2Tbn%S=0nv_8~*U7IMPHCQy)^_+8g|%eIA(WOuD5NAg!*wPpmXZvok%QZ_gQ7 z>8(D4N6j#EK!r|VaL)hCNaYP(D3pf4x~Y*Ap`J4)sDG= z>ZRP%;wIc%%Z$Im-z4Ab;NSClDUCF?iH{)IDUUc-dE&>8y8*>czB0Ogg1U~qQr@K8 z!)y^9s=XFOrP@_S?e6hm^$uDyef!OhzoOiv-ZSOI-J9BcgXN~&WAo9>TqMbR|a zH_|xRCDKgTb<#|XH>ihWX;c}GQQ^jkf?ZPG7tzEFDmAD_mjV=nYbOe@AprH@2xA54 zdWplDUOMQPb}$+k>bk8xMyX6Cpo5xqyx5VMyd^bMg{3ewnb+YI^ZR=({yG+8+IeJ7 z99%LNXpI+bSTUelO%g$S2DDN-#gsbNB^A^qB}$?E#f8LHX_8GP2oT9>$k>X3`}Ozw zA8Q=W%Jf%|<{cB=w3R^|O5J7w=v%#Yfqb@cO4+`9TJQ$mL=mEfL^{)B& zquZj6(Wu`>N3dcBL0d|gA?8X(Omw44=Ap3g%GGlEzTz_M&DqUwg>alOFRo?L-n8|_ z?JKAWXQqSQ_%OTb-I(0r>qc6l&}fgm`nmn}~J9xu(qWbe#pH+bpE$zuHIuMf2@dufD04tNcd`9kv*?xgi-0s!Lp?&iy=)$}NBi zhQ^t**{7AcVM3KjJdk&PUR}gmMaDU@B$f)@T!-CMAv$O@Ot0GIyr@u=nJ3X;Ls%(X zp{G@Pw2qq#oggeII)2Zk(`oL;D;*=#rqqI)MXIKIMmv4M^ALxSWuI9GJOUpZ>RCNF z5Uwjevd~2_bZ*$+NSnR~pSC7cS&CSMQqWc0pNWHm{tXf}ktq9I#E2AKsW4FF(4PT= z6GSs>k|=|@sWY3Ypk~ylM5FNBfkNUmJ1a> z=F*~)t7wRrY??))32lrZOI{L4BU_vs$XsHvGfDB{4zALKiG8u5%;0`eF3l;=1Z`%_ zYhiAp)uNDUTTRZhv;yZzpD?v-J3lySOm&*!ni86Jc`IZAh-}*B<0y7h_|KWh%}lw| z;D}>lpnj_fh)t`qM3UOq~_rNpTtG7Di0v0+YnDG|?}eh!ybt0a7ng6xKsFM`mBQ7E&CH}{w|!3*uugAX zxfc15Q- zb%sBXnQqTyr*W?se1>CJLV)S}rr}YaW`!1Yr*{nX&bFi6{xZvpSwCmlPvFw-^QK$_ z{M90P_8}aJH{eV3^tTNTdQ%K5q1 zt-PnYS$L-FBUBkON)L-R9iA*YR|UX6AD|xmDCsPbQL6dpfpwfsh`ZUF^hx zr?q2=H>SWW21~AGQ3*sRuXOki+aF+6vm!l|_7G>O!ucrjCvL02p7fHY zDCv(oyx|wfo;--S#EIsHC*&J5L_1UX)U)S;KY~&C*ub@WA~Idssx9&g`_i)h?x8JWLp&1^JYCekZjvlvH1&e0;nggwoCPvj z)Dy#;>Jxgk2|&9EB+|#IVPtszoeCVm*4|46m^SNOfEF8p*~MG0ldiWwQq(tW86f;jH{ld-6zyV-z*@>WjN?zk%5WE%X-_X5{vC zQevoXd7|P3)D8{i2KU626-5Dy%{aVpP1}GRrdb16D49IdAIn*Gb)y-1JSArCk`mv- zQ;L0u&L}Gt+_j4r$5;{?Trk4m4sZ<;ARGVG~$z}*9h1~8hkX;)9Py#OcN(wN32VE z$N^*L0*x+8byH4*;JZ}poK$SWNow2VuoJ`flM&06hEzOT6Q}uxeCgE^1Xqco0J*Kv z7pR=?64O3m$Z_e>)Fs6a?-A&}&4%*M%45m??F)VSP>FC^rWlmQy4Q{1(rn&bE_ZP2 zGl9^T&r!liF023IJ9mx%)U<<(so(2{)KBSEXes?(>L(|P8E4R6Am5hUv3VhXG!CH5 zIVqn_9~Px&uzBebJ{n@*fVLb&_C4i;^Ypgxx>n?Olcs$(tJ15TSZ=c;_A2U=`5%z~ z$Y0Ne3%qvdU%%e}Dct|w`H}qZ^4G}L0q`IDCv0ou1aNcuFDYECrsascjPyN{!1`d; zlt4N;u*RexNPA0FMMws;xu!3*;Kj?lp%X0@qf}qn<5(j9;@a`?#ChTU61e+? z->3U2Hp=egr8L^=eN+sNQy^)`C1cmd;Z!JLty3S{{;*IdV|VA!a&17Ww+4B zb;UgA7YL3GSP2^h&Sj{>Z-u&Rltj#2-O|sJzfI0$wAft=8viB#qn;A-^ z$aI57A>)gLA>#{uB+D5A0meef9UUa=@q>`%36#%pLr)~j8Kgwf8!DCYlH}`!og9L3 z8yi~H&wS-Fk?kUasU2Xx7tZ&Pq~o zp)y`1m&;Xbas5rj$9YXbEo^_<&~|nFaBe@_U`nwqV?IA#Qk?*huXM6`7V{mJkR32eVS*W6QJ>~6{snj)0twIN1sso6B%AT-iuT`p z-jZxx*mbR_pjR+F4Koj^RFtt3G(ENp&2SgxpX+m&U(uVmjc;1?Y}1?<7zY*GcX>TG zIml_2J}#Yyb>41Q1`&&HPB<2q+6*fyokIbpqsl3d84%0cTgKY&B*T4);lF)f-hA7q zN8IYK)-H0ghBoCuJWawR?Xw)lm9UV|V3gXe))hf4Ca6w%Gu8w*FayB1>A0g}4_55Z zk3a{eSjn@pn>{djY%3E|snN%9 zx|^v*CeJS;s+r)lY88yI*;c5o(vIam#Fsh>E-=lHYS9WQ${dBkRvW!yS_~!c888f;|v#=)$z#V<)~ zEbYSamcXA;UJCG94XNSdQma+Ro`KbdI`66LNR@g;KX^6(TWwuK?w7zV!IOB8XL0q8 z_}NS?(}mS1A_@5*t}C4Oklgl|l;Dj7u7{96yOBOo27WrEI308CJ}$M$7tWCqvhZqk zUw|fi$3UOIs&<1S4FQ*QgK3c>o=I*|c&CURj8oO?xFJE}Se5dxWXNbvdSZ`9cgS2j z`)z(X22zslvJGNw>DEkk0VFSk{vU6=A^2HEsr;25?Thli=a4;G!knV>#&>T~;dE=U zuME{6=n26i^-o@g*0PG>Jm)K9_uBiFNxs%B-vzySCtSW5H$K}sX5n{<>dN=t(bA!x z-N12*W#RvRj1kLRZ|$9o$CI)wHutnEJC(AhC$*ZrX)Zgki|<=XHV9F*Ju|V1M1S|) z$7b7iYD;YFh`Wf#f;>wJ?!zSa~!_T{?er?fqo)_et5 z-WtMuOO^TrGZX&w)HMY*$OQws!{}Bls@YJ!4L+lLWxr$OJ_BJtn-Q6`2||{qg1m|X zbmpJB#jU@=%UAjxkb$qEpT*#IO8XbbLcm++dDp#Q4y|OnqqSD|nRxMZ20W~5V$GvY z?sA`=^@`j;772%S@kIj(3Kb->yrUkiy_$efQi)c6OuT;b|Ej6S`2RI?+heC8cs<#p zcoN-W!pItwb)@vCO3wAVCiHk^U^6S7tI=31T3;)XjnC-qJWz5qB0<@JZKhYO8XUFv}i9Xw%*{O;$-lyAne+Dp6{8^{}fNT+|vC^@L#`B z5dWv*De~VJPXz-fCxC;E1i&2OVuoJA&--473$dCfua8-5Z*o!-m{QErBi4TXMizie@yZav!HW{I0x2OqDM!)EeZ22InR+z=sySMr+Uujh0Q7ai0ba=cz(7 zI5=%KP&nO&;K{-hNKOhX%~7)t1}lZRV23?x&Oq{SkQNnX^-02+Lt*jc@591dGT4g@ zYTz!~!5d*WgjuHbm4#s3CxMG!bhv2-(wo*ExWlC$U9rNNx#5LEIbWA7%c&!sV{%v? zMXRsWG1V*1GFDIV4VdV*WXnvAVU05rnVggwN^T<6yZKbiw(u~X*U!y>1tpNWh#Xn1 zK3SPo$Ug*z0E~kMEwW+C+6NxpT-9utu%|S-*cR-wwxfr*4}w(U-MT6Uv*S4{2g4hQ z&aXR_?(%ZwAo#Y-jgjt?mxtCXUn;A9eyDp)O^gD?Z1|{oy)N-R-6q|ZvE|k%`6vGY zOFbhxtm{0Z1x?-_y^|;sH&u6(c~NWu$7L$YJlRVx;S*|L{Nll1o^qcIk9id0q2vZw zPc(%_fwe`5U?xxIk0nB9|=O8?3Us_1ZQ_AGq6g!=m#Mw@~ zqOy4?uLI%ZcPMray67AoKC&$t%B@?!%v9`kmb)sk79`RP*t23k2l_t+VtwSm9$a}u zuQT2di<*e7T;f={0GotWl@iY<^Ps}Ye(jF-rA3@(nbU{nP$CG#T8>SAj1VylQH8dgQKb;*3V!K_lRqsX%}1lcB`}rk0QHJwNa+rvs}Z!a5799!$zdB zs`!>}RcZYz{u*V6iiUJUMx3$e_|}VSOdNsFL~NpwZg*5%kis5nrz#>AeRL9;%x-%+ zRvDsSZkE$5E6(WkJ;_E}bHj;5Y%_a=9S%x0Jv{V=kFNq0szYCg5EguX*l^iLZ~kUw znUeBOI@{)|I-bWSeh^3p=dB87+;U4VYtsC`<@gPwdZOa z{uiN6t3$adttNlR(wy^3&%yu;20>vm=u0-9jYDCK@oYM|Ip=8m^k3_XlI5P}Jr)ZMN7Yk0IUMiX^hOVX< z;n^axs{~PLoA<|Z8}U!)xagzdp}aHqHwF6^+pFtC@;}8Z8-W96hyK4@P&TkD6c6p4 zq<=5Eem?7K7>KUHqhT#bV=*g^Jd~H}o+EH5XpW;k{0(}W_?{7PEDWxbKHUx4nPc4n z1ZdCiJ5hgnC@+oQv~#z8URxw~jUaGq=6z*Gp zmWH@ul|lZL#iP-~e0KM>g7wtK!}ZRM#r({bVg3}wE1e|o-?Cr_qF}e*Az()jM`A~# z*d2mJj~rTJ%f{V+0poWS&5M}J4!p2*C}<`Ec2Lh%rCO0oSu&^|O7_wGZK1r>_HtP> zf@>{M`xv9q51gaX_i?akk$_U6wCS*``dwKxBWq#2lnT*5BgHO*523qC6sdhE?wF(5 z_D->z_fxU0hcdC7rCk$gdZ218o(Qv+D8ewHew!uln*p@ti=#xE5)_nm7G*HibQ(=c zd!%b)&*pF5oi|;v^OpwpkKoEhI5wt_?l~Q7sc_3}O(Z?2Quh2Ag~3-Y?wQSSVB@({ z&Fe*6yYW|rxuf3iYIy(Vs}GdjhC4rtwe4#3u2?l=cMgo((QItB&n-1=4^cy{Tx3kk z21Q(yFr#VjY%T*xWLPtyVd+tCOxBw+^@2mFq30j}ST9eLy|}q!7!t5E=-ar)Y)`y~_=EeRABqVZl!R`r0x@Px+Ic0OY9$SP+ck@0Oiz?FV6d6i)Vpc{+#l$>T zrNcqHqzJzLAb{r9OA3(6{)VAurIN}wMq}!X4p(lIMkrn(9oXJK8!$MxRd3A$g8%Mi z=nJEA)YoGv2r=E&@FeVMJo=I&#*zv;)RulQp{02(dHn76#K%kTNv*sH4c&9i1Ib4( z=5tP;H}-RKhPEYQ*OV!NU5<)CcA4pVZN7?P;O#kN79pzS!7(9Pkav}RD*n8;sVIe@ zqvKZJZc3Lzt8ZFvMJ;BGEjx_UrcCqo%K>^-O#bihd)a!4AZgyvFwT<-x;V!+irAe; z9z7k@h@is#CFB5a#TEnW2xn~V+V!jTi|&oEp=gn88gUA(!Drj^>vbEFjUm#iV~q-* z@%NFHk!Gz`_Lk;qGiyQ)wjmD`LL-1WX7+V-HHdIiy8d~Yp@X=}sPnS(PBAOqDvWu0 zTnZeab6o}=9+QUdBojT^f{NGbGB>w7w+D}nNIN6hq8aM}60h+S<~*upe17x!KIEpr z>_%{U3;C0o8H6Czu*X43%!&fd#xQgcQ+#UI)|zk{_Qs$K%sqsl&~5n@OUIDYRhmFC zEM{`#8%jPEx2YPNlXzMQgT`(2-2qi)%G#is_f9~4=KRE?JBaAE5S58`&hpI6H5!vJ zc8QQtJ0nkYomEN88E;o4lTEd>)8)L~9}KwHj+rECkG~Ra)K*RnbwN962Ys76nM?^? zI{4F)2eAY<&BHZp1=aPKwVD~1CPb`_a+^6EwS^9qIe~O*so{=taZW|lXhO;MnzmBtv}`#3;K{3l zV*(|T9enuWY?_Wl3HFRnZ9V_1I^)e!JFoTL8r$Rf)e3QR_>C@916rrB7?07#g1=-I z?warMVC}821ZQOoeHPE!I>=}>WBlC9SSj-8+b`$loUMK=f5)Np-AqpB2LcT*vCyM< z!iRRfW}yLxF&bvE$q-Wgfgq$m2LAC@0s6b*PsUW1_VCeXRm#UMft&dfH0Dn5UuP}i zQD&`c^5^4FbQe&Kh@K^E58>e%`Nsqow~pi0R7p&sru+j5^D>naBx>P-GJoP9Q+bh` z3971cv-A79{TWU{Ruy|neDrfpayUw5gWpy%_p63Oj8m*$HvDZyh!mMo(1wk04OiT3 z9YUQvNKr_b+S4B$T9z^6yRyc}e_Jwn(IQZf?_8(HIonaELzi-B-LGSvRiy?^y;WNI z8JkoPCdxp|V{;2kT*sFWld(F2W|yaZ2I=Q8sZyX#X3JVm$heAjdJ?4tx~D<)1%yAq zV7HP@#F~A&3@0g-upUsxNLPs_#o6w$`G`*9+&(UT3WzeYSFFXd<0 zc!OifbnwYaR)WKcc-6W23$}_6M14YjvJmQM&C>3ZaGa5TO!fC&2A|*!<_=Ug$9M8U zX8Hlu(>pkIZu*>mQciCWr|I7F8w=-&puf551nDz5yz{N#pnk%p-4QRpFQrpv{3j#% z%4i(AcQk@%>Z0ELN_}hOm8JEsh{OfuEclw;VmbzBnNW-UWAUE@7c2UaG zv1xYi#D+aahdsN?f$oqUff9d&_bVp;;MduqDnI_vRCLrD*C`FTpOh@C!W+|Fw^W}HqR^=Tvi8BbK0fF7SG z++nMj;gvy#5Qy7GveRBuPE%t}>8_r5jhsU_+tFEOnkIjQyuk8WcliOa&>lZO_GnsK zQ8-oTHH*bI2mTxvErh(O4Sn7SiBU$W`e6!$mheHpNk`z}{)B%<{ ziLrZw=zQBspEZ;wkH1Fj7wJJHrJwiS9~VzPbTCXxmyb^D(9H}SZaz6JLvdHDN;TI~ zQ^AK4QR^^RDMaBc6ou^-0}IRg&s{vKMso^Qc~x5-zfXVrKK%+@roluG7J%dDeqEoM zwfCtxIOT0^AYgvr!=2@GYLr&(q%qeXxztH}(di9;9sb&__)5B%y3^M%pOt1Xg`O4nEPUh$1j5R2qRl8WC@O$cjA&$d7_%mV>d(pN~<`>tEg8W zGgTb3O&4-bNoN`TBkV})lVMt33O?!8vDT1IdfVWl@(%dNJp&=j3$iQ|)=To?Ry z{h4#g9aSwDvsw(NO5dorqwl!MAuIRL(Z1heb12j5lGwU3t=L`Czw+`epEEg`OMzUL znAiwJ?z|6tF#_u|o6k_o8Qv&oQq8p@yzuer<>wADyjn*jfON9WkP*@7&0(75+D53y zca)$$Wi)#_F-mIkHkQq3SrTwHi3*!3Z_%s4aAne8mkuV3MS(f77mz=3MF>`?9TUyf zroT27U@(gc`wnqr_n($yTIoa zj&E$KcEr0planC*AY=Px($fZpp^@V^?KwsrZY^ndPfSsh=7brE?jA|CV1_+s+puMJ z-kRmI6Cl=W^tBU`x#J(YGmQF9tjrfU+Gx3yQU5E-o-)*a8N)t&(xe9j#Q&H<$A_}9 z+IWKd+~Y@aQI!mpzE6%=|gFQkhW!BvYN1 zJM>V=s{*2_ELGYSk|UMfBdAi#9LlLu%N?qzR?C_4CbU%COR0RTCTf-6s;Pb|CVI;s zax01@nmJUY!!5AN$A%-S)Q-F(y7b}^C>{hwZPbp0BfQk(V<{dyBewF##L?c;hzQh< zSd@vUsL0c~$qZ4(Ze!voarg->>@KTMRrF?J~y-_`aiP$O}!$5lj z5$!1&b47itA=*_sl1F{(A>vm!3XS+IC*uEeG#c@lPUKrN_U}H7==Y!a-akjB5ueRO z{7Of*sBc=L-*WM})DJHapMgX_Wn=WHZ^cAE28GcFOx(+n=dc%8StEPe;0PP$;dh(n z#A;% zD#w-eP&G31swqTr#m*Hju?i`6!<;r!ZxVRrp-&TEeKf(_kSfpvyb{x|RnV$YsZf0? zFQ-2pcE->SC=dqA3Sfs5$c?!*xTgyqyH*|^%vP@|TM*ID1OAA`qhD%Lzb!|5;KSV< zVT2_R?U!a!x;O;P4VK;-Ehyr&MoEQzjf#?qscl{guQ<5}&25cX%G?&&zwN@27t)Um zE{#QFWXBjNjj27o=K;>n;1<^Zg2`i3I(-1jolO{HS{Eb89HJU?7O8B2$B%IwAwONZuqG7lIS z{kJ5oUli76ws3OT0UWbUwF*#|(=P^Y%G|~hTY~}zyMZ~x!m0Eb(0}`@C|Zy&P!JXu zHiUsAtKSZs7%R!#q5ZX#Og@=T*5epcwnl$P<)9Vz8~%4Onc8G{hBmQJxn!=JQX<>| zt?c`-FUvYjb%vDzQ1z5c#;bDT1d z{)Hp4dv;kHWCC-CMmO<9Q@cxC6QS5J- zAcQxdhID#M&5XDju{+@bsF{MGjcZ`Cjh;L;#eY?504kBD^h2)uhO1i@>(Zy47L^y5 zrqi_!%gVHs75390JudqPRIqgEDmqFCHWG1kcuCOMHx|P*J%}p{m@y)s(TuC$zCo!2 zlg-%DP3;?A^&_hMo4$3G)x_A2tgkvf;AtJiC#C~};B2TX;zD@+dA~g;qyruj$QGu@ zN=}DMr_v5D!axPnDzzZP><@Ws#0X5Bn?DR0eMwNSS;Y-q(hhzF3HH{GgE@+W$oeFf zQT9~^@q=kdkx#Ac+ao9ugPaNf8+DVU#MBuI47P0%#&$zJPMX$&~6*hvk&!r2QJ!CLGMM{rbtCXqEz>yTVUvC}V<^r-{S6QR7wIWl1 z$J}oyQdaL$rp~AvKK0xVY@ou)a2@n}3IL-eeOlihx|e9u<=$Txo2sm>nY~;x*tl=% z%EJD!&ZQb<*panxVUfa{)Vp2;OUFxzr-!mqKbln1*|m&O{*t@1kHGqFT9nCjV0&*H z5btg(;v{Wi_lCV+gXMh~B&(1mr@cDA@=I!EDH|+Tg$4%8SO4IH@4-%&M*h*&V~- znUxy|8l>6`3@E2c)Maj&>s%^A7=!s@{a0e?X--679M&cvKE6PZpOf0V6@X&!ADr_o@OjCUmJN&83 zbo<1<+NqQId<}}}p;T*q4VO!V-y5^4N%%zk*c|WYz=+17z!Oe8pD`5kEfG zS20X84-gR+K<3%s3Zvr%xPfaNR7udyo&nPdb(>8<6bai>fzr&aJVx>oqYl=JmoQ)% zUpyHqRW(mmOZ^lSLemCjEG19+o;j!MDcKjlCU0#m9oz}PHuN-7&1l14h2~G}ilSqe zg|o%yR3Mh%YGme2><@lIRJ%w>2C(`BiO+G0#=PPb6yJI zy!lVQ%UEV+Ht#=a6dy~;5ZNA#bVO~vEpNhN-#ROy#Ek)rFf}5C%)eDs&&XE@p}%K1 z>wKkDkD7eY%wI$cI`BWsUq)X^Ni8IHm(Z)C-r0&hNu{@!UTV6UcE>$boWf1mHUZVM zn%|b~Alc`G!rAA&Gg6`2sa02c!e7^PhHl37ZwaZY0x!Jl@XB^CH9DLrCf*R=Q@mU? zv9QEGpYvt4!?6v;!SV!%GZfeKs1V5E;wdGatuMSRUn?pZ$#gJs3M%FVlLVi6lgNgK zz&;aT;}M5IWOAs+>0=7Y2csaUy?4oO+VyuHz7oE{%({Fa`9mOhH>CKnmAuF{knfuX zHD=;kne+|o$3Tub5I>G8w7Ok(j5XJ99=2~)!~fFF$N5=aIz;FphU!6yP&CKsUx6Kn z7DOnlWg0yNBPL`$J(XN@Kd~3j$6DD^9%8Wem zp*p1Sb*6?GkY3(g1}1LsbDPxpFO;Z@1FY`QvEPny}qB=#)0(W zoZoj!xPE`_RcTniyUt=gCx?fgE<;YM;8s^UompB4`2m$c!>#@LYGOu5f*4w$SgCy# z%EWjrsE)F*eR(AIt)ZXjRES$agopU*nPbwc11V~UD*=Lu5*rQ?xmv?!8~r3puC8Ck zVBCCDS3P{!7$?6L^zenRbx|~&G~=qF!>wG@TTLFZ)9Spz`bYl}Z~Cx;oU-2`G3c;t zZ<*eBf@P{ps5b(}v{3&-gDZik+yl5I&gO_>kyHr7E(V&dQQJFFu|NWx=qn&NIMjgT z`mk|he!=+5T~H%GElc5*-iAMj_!e=aZ_paUj*%?+55m45*^OQ3_0FYwSIt^EiyJbqlp4zUr` zzzWK@OF#=$tgpMi{A+B4KF`_(;L>`n1tm&Tk?%|jw7TmolnQrwe87v5SFMN%^qbAw zrt3*8Z(F_E_~9%;T@|63)pl#T)fY`Ep> z=;jj+Eoa3d16st)ijPk^nPlx0jZbIAM&8tN0lFgL3OE^GK$>$%KuFg9ShS(iH-2hxj&|cQv3iER>Q_ zolJ+3Mj{*V^xs`QicJ&m$$J1|O3OdzT5DvdGXU{;yzMOU@rewwc#Ydpeh1&^!{`6V zIPjAv`${zRW;(mP1<@`5!*jGdFu|>OAI~`8df(H=i~mjjiYo6r!gYrRq_%AJ;;w2+ ziA_-kZ?}!Xa*-DcG&Zxo3lxX4iPEoI=1Jy#d3>3sOh&)((cQ9u`3Qc!u?B68;wzK% z+^YCau%f=fEnpdX8~};J>$vHh=!6-XmeEGQ$cHP;t8r)xK00SsNPl)XXH{KwYEl5I zP!RrcSw}&2w*=>Pl|-3{P5u$fg>9;m1asZfYDG6vXu@k9>p-i2BW9^A|IRt@LPJmr zLI5kT!;~#-iqqkR0sqUeO|0(P*IpG!Xe}orTFJaoO^?7L#eqD8Rn9l3w~D} zvzf6agMJJQwh9r396CNrUA>n0i5?nKi>AIgkIc-;VrfW>S8EOFA-%Zr8$vd28_5FF zRufqc-QOK5NEe9AV}zG#_lh)vXZk9$Icxh$wXqi^OUynckA<2_I@CvcyYJ-4QA==$ zJqS+w;oZFD$-%ua7QvJC9>Rnrf-NETb@qnj2aZq^MdFUY6L?0jHi~2<>|rZ=>DXn; zJHuve_{afBA4ucZwsG;FhCtLPhqQ4HxG3b^eo8UdbtNZZdLZ~ZWR)~vvI0HHz@xVV zUZpX}Z$|+y$F04sbmPh8_(Xwsf^?&V7{*hjEdY;7#9B>Z=^uFIhH)};W~`cDA;VQp zw}qehn#aylw?%mWeiu1Bd|F_0V5|tu=!$XAeyZS>v*i#289J8@v=H)DF8rfh9*i#- zQB#;aGGK9Nh8bNtUbiKqZViTRljxfu3^#m4tzduHHAK6+06QI;2~*&Sm< zoBdRH>S-3)I%N-gmrX=YhiYqWxRuD}qwL^C05>i|%Z`|s7}gO%_uf6)ksb4JR7c^h z$Xs6rt-GY~%Tz9ltDSj8J)d)#bH|~yIq`dc{Jg*y=MOn)QALinX$&*F+>iZ95~m4= z3HOQ9ylp%0bbVj_oRz%m(wTLjyHo&Ys}?HE1BG0%7A$g8^v+3&@SF|T?xo_P5Er^j zCDk-3ZGJ;_+|cMm)x}Z zEvO`4q86@mxO;a_K)3QfP1xdO4DsO;h@e?u!J*sJ;zA2}oJldkKH-pFz=l3bB#D+wsIm))Y>AaB>Jg?;GymV8rZYj*U zcSjmuvGh6hReq`BsTfEtvS{-(==+toM& z2fTs8Obo$@VjkIHKYnTJ$B?iM6aJN4XZfPKP5OP@=}o?4H`)wO>2$f%!Z*j4<;V9+ z>uqi`%z05t#$Jl*qVuC0!Qx@jyd1Vv=a{IxelNe|=)6*%7j_eG-c7+T=f&(N8rWxC zv7Aws&U0%I|D9r9yX8oR!&QeQ^N~R$NOxRxjcGH$<{8!QOmz^JqlayZwke%0dT@2o zvr+L@QSy>iCmFdpXQO@X}opxRlhE>}-aH^jD;LwmvD@}78W?MghAd!(Na6Vm|JsxZKzDcz0xIF(z< z-HCghuIV>I7Z`VM`X2zY>Wp}`)Uk5V;T?n3)x5fYW=uih{ZD$Hy5FNAp=|zeU#`?5 z5{m6aeM!e*9FH3#*iC~Cqu3w!R!mxR@ey}UY-ukgeDjAagcxlvkedb$Zz%7crz5wj z9j;PVxm|~fpO#3zO|eiFh65ZiuR>x}TqM;=wu~V!J^x#3Jl>E_y}Xa$9lU|<+W9Zd z+Y@zHb*5#`4676`_q6gEcjLv>I#6oN$^N9n9gUM4bkcy1;rIlLXf~2>QG(0-=^cl? zn7-j6t*E(t>Ci!E4yjOLFy{q=KOubDdJo}gmgsErxR@Svc#h!#chYtMyc8o9MY zS+}587BzpwN@{yTXC6B6+OwZbw}I{7Eluun!22%=+u(An6K#~%RHN$aG(~!- zyHinZG+f&B@~B%B?Gcte4RWDdW~1$vRq@*#aPVTDY3)6z<@i}b4xhGSj?tfU1b)qk zW4_CxkzJUZu>`olT2v7$c191rOPO;}jX?+97=u0nH&qC_Gj&;?`c@m3ncZWO#=sgR z+y`86*i4cvC%Mj#;#*>cZawalb3WK$mzI`nh*)sJ=0)Kgi!ogwgBg+|dPX>=na_57 zjH@W7{4c)VDL9v?+Zv8-+qRP(+qP}ndSctQZS2^#ZQI#N_P5`2>c99e-gB|4x~o=o zSI=H+RL|LCjCoGhyXxk&8J=oe3_bD-O1`Xt1O1GwA$W?;do20tbo%{5zBzvJ4E%0c zx1x~Sxjh;GT$e({wG;vyeeWucluL#NI`nZzx2%J_P<%sX@4SSgP9y;@yh+HwB}R7f zRk7a&!F4g7udONSPu2sdC}m1Rj$u|l@M3GWqJtGYP-F^ zIdvUXn2J&(xn&>lCqnjcd@`{+##s6E<=1FDACkZ}?9EMTcnm;0ink?8u2IH2G)~?W z@*XTZNKY*rX{TGjP_(=S4UkEuK{hq=52OzW^aqS_%jo8G0zsZ9U^PVMu*l6L5gRCE zn+uZtE(yNXNHgg~7y8lGSLRtlZwHo_!*za;oyR7}Nd}9yB`v>-B2Etj^C)%M3{;ib zt@DycY^#uA|D`0#xtbENVhI?LF6U@Y%=8!1`CjO(Yka zN&3uMiEvUjV&(u29<74HubB~XUw7PJIh_~`*sq$|&wj_IU$83Z(CfM>3EF@c0Ipd1W4%9?vj*;?_9| zJZZj!qTjnWl%0^#qJHC)?(2I?mCt8@c(i#KrnQ)+JmLB9t+A+4tIv0)`%Yg@Q>#-D z&v~kHT4X}!?AZEDTuk2-R{xd+6O6{cGb8NtUr%7{FJ_x1SeA@&3 zi;p+gw%Cws9`EUdob|~x`&fQy^|YhbZ&dEwKvs~<4{J_|3z!Mj&2-Wh2Ry&WtM?|NnZuq?IoL6hv)eL!B?<}p{^{_% z@djip`!q>$?%9h-gM0@a5X4;BV?+Pa3bI3o+OqFR$_Vwv*@K*M3=h?I)bB(t{0Z#A z-8d!CMQS@Vz6XLiguFO=M)Hg9*ieXvNhJ_*<&C3z3el9&%eb*x5Os1wmM)ueYHfHB zC}xiiLPZ|aMe9&R!%OsR5frPwi#yoJql-Z)M)O5gY)%o_AA0f ztswo~>eTh2j+0CM<7o0kUe*uMT`6N(X>3ka1sH~v18%ZqZn`!#)yfu2JTWih0cT}d zsXxeqmC$0x6V$d8l^s7UJ?S@8bm5$#V1wU-Dj)sbyNvJbb^d2!F2;N82a zkTp&?yrh{1<7_1Hvee$`I!i4%!yn~_<7YTl8Kl5bNhNGzk`d0>V%A&}I0->#LOAHk zk5+C1Dpg}1tOyY&3W*bhiVZ0gU$8C7P6lWNk<>toX=0sRr#f!4b1mBgj&27E+hCXX z#tL77l@}$bM)^xk_UBzpLoeo`rl%t9^%21j+C6OOJ62tzO~7_!#yTWiqbHIkpE2tb zPbzN-O5ASc5*;I!gr{DnW@t|`!5fsw5CCz-g+~vTbD{pcf4N+KVE1*)YBa9L-hwkztduX6;G% z`4G94k{gN4-&`d(4{TUhj02Pp$5uddMu&vok8Hyie=>P}2e7l)EDZ8ioV?kR zA>Xj4m>$+zbnKGGUa34+hppC%v3ZH_(V_(`vMKMF?`vX~UdLEGN%pZxdS^Hk29EY0 zMlWv1SUou&@nycYjvs=;-=<_eHiAFFt7~R;{(#i(h--h?YexsaYw%3IobUfQKL<1x z`x2h0U9;W}o^)utmZv%VR?3eB`USbv5?w!I^%E)Osq z&vI=^9xr@{3n72R=?w*eANTJXzN|6&&bGceSVt$`kT4GJc&^LRq}CBc>{XBcvQ?1T27n$52JJjs(@*p>rF!H8OMpx_6;lZ)S0cr;74xLyW}^4 z&^TQPEAT1<^NR?NzO&Nc`Z=GP^TVp@-9(ZMO4uwDDkYroW7{AT4qv>_lYLH{k7Lo_ zz)MB;I^h*hoE{h#J5+-#?LM1rSj+Ups<{20quR(N`Mc?8B0tP(rNJrBZCM+3ycb!g zJz2v${>tMdXiiGYTLX6bGv`}t5hVux0onfDque^(Qj>6Mz2_b-zTtJB8C9-S4}vM5 zj1#+j4d;hM9n?nxE%q62Y>(E$)s)}i>o9zkW>2jn_Yj|=KL_^WQzM&k6vMW0OvP^}`0WYm^8qn@tE2Se?i zJ*?4Dvzv!VwcC>tGHqEM)4Fb0AvLn6?FxY>1>WPio@P=R_^bMEhozXYArmeV`duc+ z<0NYvn7YqXt*G_B~NNZAMJhc?+YMet9SgmdJ| z3*!?RC}otQ7$Dv7&r5hO0{+KWiW51gA&UqI2u$Yx;ySSY7uLSKgR`BHt+JV| zk*k%vnVR#zLl*~IcQfbzr9Olv%t#LjqJ_@cEs2`6sOv#@&4<L~<^KN{?ve{2Agr8}#xC@CTnCO$!eTheyojhsukm*#@H3 zq#34E&9222?#AXH*Nsv&GoFX#$yXs+(tKvEUg}u-ghqP(`*(2kwo~5CUC5DPCYNMc zB)96;luh+j8v%9q+C3J3qNE%p^FkM%u05V&B9)IG588iE*0!Y1Rj9jh@*Rix-9N|C z*p}{CQ~XW(+?R&LB=vnM)3G7KK=2R_J+8(ReKy|31m1J=Kw8%(++{hhgSe)t>%sPo_W?^161{}Sx~8s;89 zNMGHNhx_ZT)V$l3VjU@RTd4}hC9_R$) zB{}2~q$4CPVM8KlF5-j`3}Gr_Xkua{Z$zQJ>0S>pkGqoQ;BYpeIzxB^(fw@z0DyKP7`2!`=pcK*S$e;V=Fb%>kT+-`dtgxU z#jG~1I3lYytt=KtV_rR28l|gtTuEYpK2~WQM{`f^JcHXseb} z9Oa_sltd;1Ct2pegde5V& zQyX!s>d;5u(-=WieGpOml0^4WZPY;LtT+@`+b)U9R&$D`u~!?ht6DFQ+EQ~0r@2)f z(W^RFAK9tukU)Q{I^9P zkukK}GGP5`ftxe^V;roqZeZs~<1+;Tf9=3Azp=$(X%x^dMEfTkeshC|1F9il(=eQ6 z2!baylVg2kw{^h&H4lE)=x>tN&DW~7g9jiJ8wSq`d%8&r?0j9og1oEd8pC!V$ z$hi-{>4IIy37M*S$aF-=5E|eH#z5>y4Io7J>_<>RGYYR9Hq2_8Ls*IgB=^ISrwbAQ zc%ZhW_8CBSz-}oV5dl8X+ZN%OlOP~6&~&5?dHoO|xnNz_Ofn1C2~+-d;l5_2_%Ck` zU{Pan(qB*jmT*B^_>~o9EBHUb_^ZknqOU>tm+N;o4S7Ar;qcdHJhsZ_s9YlmFV|u`wo2m2nqr2q z0CAq2nhN)vDPS;F=VpVALV$D^OIUxUyQPvX*eFu}+D%E<&9w`N0g@P?O_|R-F6YoKgIa&iV{3ln%yaKV3#)yI!0hO{3f z4bcJYU^vjeYrv(_w18X$DiZ<9dsq+!XxQZ><6ZMZiotTjbHeep`!j1#y_oZ}R2}IL z`tk`)l7H$UWnfT#0T=W%xuUEgGC&PXo~$9XpBk(UIROn5`8^MGwQ&xKiWDk_Oi{8R zZRMBtKYM5Q7NJld(b_UN~otVLL_DnY@LALNE}_L0E5~-214{n>PH5fbO$*@;fn9~ zfXcZGq{yjL6BQ}5*N>^&121En3Bdy;J!BTw{g)6pS z9n!NBk@4j~O&m2tj3~U{7{nd~9?Tu7Be4G;@tDYw98e9VE3&T*vOdOvv&wPu2fbL=i_Z}&XV+hPUT{r`3ou_Ghk57afKqW}OB>MNrk7z7c8OLG68q>AKTC6-WG zf6*)zG{;y;=$bYM~b zLG;+aNftI=8YSrlwE5Ow*e52j0#iaqeFw5feJ7;sKRhRII05n>t)uyrO_(X1Y(Ghl zF{1{j6F32(_dnxr)?e%=Iv}7p0l5Zc4+0NTI04Bnv41l|zF+};NhJ={^;TaFzVV}b zWs7^2&xkZVBBcS!7kV^35~Tr37k)H;f~AP5eMs-sqXNqQWe@Qm{2(5dU#JXhKh#ox zJ0ELC`^cpFly_I&*CzYeejHAqV*`f9EWZTU;v7DxViAduawx5Y4TB{IH!vqAeX=Bl z*r9Qip?HA9AFC+XPR_CQ2-NQWk?y)hB$x=u$)Oob|tB=Q{)VVFN~tgVUj#4!!U|8s(>X? zbauMZMHY}LFBsr(3iW;0=v1x;V|4Z!3I@u8!f5N|o(f_`8T{IlBQ_<8V6C|&wnl&= z#{$*W2O@i8=6D+tSE`0}5kNW?jQ6=pr7}`_hF1@j{)$(nO_~X zPc;td9UV~4{y=*|Eyp|4%=TczuZB7|-;hUsZ1Ld@g3I=RdqSA?PX8A`;a~o{HWE{^ z0P}-Q?sv&oX8#`hgAW@7SiQ}mneja&NIg=zKoNkbLkP7#$+x~3KZ3L3MPd1q!eMDR zUuwKP^^E<&;{;jmOXQ=NVpoY^Wmq6}D9bZGVNZi#Z8)6lLSk|%>s|9Bmx7;|nP1Jn zFmf)1lKr9c5m(L^fI-o#y1O>4F$tOVuKl5i`d6KQahTIGWHfvb2v~>&5m0KG7xxLsK3lArr!G2=zVI-zR9L#1> zy}MzBG)|(_>b2F)ohwET?ycn%nq9%I4XIXwh5mW5Tu6sv$VBmVNN4?cE*E=?0#S+F zDmv(lb)T~#L#9h};0&!H)q(SIJYBbYx_gkfbZPI9r?PT+dz~|8cwMl`4!`w0Dk$rw z*MDc{FnZZy=+;e;(QLF+wO(aiGdWx}EACP4gHTm~j_vJto>wNrez z$mjNX5s_#Yi9G*(8#ot#lU%b|-E-5m<-p{qODIEq@jmoc0x*YbTnE7Ic&APU*4&Nmg zy36$8%K9jkJ~sL9N3NIZ!{vGki`I&ErlriG{ZMmfuHpG<`DRfzRapu$QtclkNp+`Q zHs-H&D%QW}P_arO6Z9)OnC1Q^IiEAfkc}^k!J|6t<7s%+7+3mwk%c(EQ}hX_Er1p1 zBnq@$bMjGJR{8*p3~|D#?oBhq&{Yt`EQBTD2^({r6&;8Ssv*J{p7r}P9TI}d2n4Z?2`Jzr+MoPs~sb1Ja&Z1nVq z&RoL8THPpcqA|m(k(Y|APpmcYb@B}_EvT1sat0QdcS-Pd(Xo%gsTGs+7;Ms`HPyNc((C02&BQJ&TPUh`A4_81o)@c&zpYLNh1Y7 zrJ})!n3~NvsUc&vd9j|XJY>b@Sg#l;+n#Fii`VTi4- zj)(rB$iXodv@{=ChdwgzRgl^;W}dAs4NLAZWJa+ef=v2UM!fmAJe!~z2*c&;t&a%K z&aXCUS96f+DlZXgLHsI)5r@(9lZCgawNJ-Oo34`m<}}1ITtKne)PM7Dgx5V@TQz>J zMtSnBn|HEmed?R&Tf#cD;GagvPY1Z~uVEfMZt%`!AxLa$B_ZzAdN*A)87W0qf}E`k*h2vN^eWVic4L2RLB9SJ(RUl(C;oxH-*tq;S?OZ-BWgP=iE&A!z-jZ;g}E@j+!( zqibr3Ffa$?{=LSB|AYyp5`NB|p|8h{Hv4z)nA){CNU7IM9v|JNd(%=#ce}THa!uYn z?s>FiP62IWb5@vhI#MiacwQvaK}q+y!E*f8fYthX!jM>?oCXv@~up?!o`tVuHI zwIuuNHV$Ej^w&7zXU+J@nY225+7wwK#eTtX>Wq28v7Au?Z+>sIegt^SUhJ(pL}hR< zxEQ+H2QRmB!9#qBi{X(AA2vT$Ok0vk2z4kPt{-TYwfgesrkgk6uX8S@Ci7Z@w{tmF zR~ZM z_w98{JbtxYLt}|@%=;DI5U<<@>3vEDx8yD+I#@A{Eq?AWpkr3G{Ox1!uUK+4hc|2= zAAV6=M~MvmhN=zcbTs!VXwxrf|M|5+uxuo~wFjSMToPSwi=?yLmb$(^NjRF029YH*n6U zEQh+2%reJuNnf|QkbL#ry)nILF`UeMfJEJI)*2Yx zkt!Sb^xxP?{0yG+WD?k9DSC(xXr0zkQCV3(Be~RQ-6glhiCg^66gQYxRMpo#w2G)> zbV>J=&OT44PuRR$$GX zUQuMT6$=`eRPOe{3spSNZNS(W*Ak=6q z=*G2T$!8I+1dSZZA9W1%*Iy03^ez}?#@ykqDiK+~g8SO|4TB76*7in+Ndd!?TSCjl zS}yExIr8wujKIJz$%L;-Qsy7iQ@4cRTxJW3)pxf}K1)AwCh(qA^qf;1X(U>D_vA+7 zGCjK~(J4NRII?87yHv)}tnoFuV{ZV}b?nYYxH&qpax;CVx1+Av*IP{38zcLYP9b8gqs_rn+u%oT1TW$lj@q{+`i8GbQDcix*6KT0+-c zY8hS&1yF+6sFJ09^~4LARKF_W#Uo9QdqJp$>N1c#v8QBVZSG(fL@?3nwifm@ zR~Lg?xv{ZiA*L`3YtkQP8^{ipg{W+38|Z16M0YNVD#o-SWpEuH2 z4!V28YI{06W=gZes12-;wytc22-m$`c>_>I=p9JYB_eJU;^rgmOWK!y#9L9{P@GRe(dciefF=D8w+`FF_(7dsbHEbN}pYjS# z)(0X&o-n;fn21QFc&Zj0vl4GIY?oUG<|9C+l zlN2vKu#VFG<_3p(q-aJv&Dx3FmR-7V#Xw7AOU3Yn$qf}FhlhWLO-_br#l%2M$5dnE z;Fft_9DEEKjQW=ul|4p1mNj+G3M(XcH*_qc#wsfBYL=v||yKXJ&uC>JyiP$G>KL`ZGa_hn&5v$T*OZ z!EHH)YljvUDJ1bGgvwPVjR3?|{q;1)Yg@(9b?YgqOa*KzS zQ|T~udr9T0?QY@Efu7EQl+Iu{FV}7z+x69Si2O>t{rYe2a}q0D80)zZT#iUVSWdi< zK8fW4HN6~OZrO-rcRXNTGCz`uCynV)hX(?C^@e3uA;t=Jgb-%I3HM0O%JzA%h7*F; zw5w5$c(>6San?{0d;p(@dLSy<%Cqq^l9y&M#j4{979w=0P;@$a8!J(gTtX(Ko4SaW z8;0A&50i!A9^WO563uB}wgA=Mi8A^))`)`5{^E{e2f;Jg=nV_a;a(?yWZ zvQa8`Mh$9guB7>Jf~Ft#fT6qY?{fh$Lo?)|fbISQ7XH)^d?C$Gp@68MutdUtT5?vP z>T%xpmKAIF;#re$bK&wv5tDhXn$(|K``22GC@#(gb=c66gF3Z2b&LLvPOR6#erYYG z^tz57$$O%W$*Ay7h>@M=drolm1A$l1O_~Hmr-gAMn~EtR2v*c?XJe+>Pg!0zxuNZO zW-JpYX95&PPkVda=n%xFGo9@caGmtSR~d(r95<)%c2*K+SN4wY8E5otelV~5dh?Jl z3_m?g?S`Z8o>;8O`v!{P?jsYX)ypJ?6o^sX{!pICE&-?$_q2A%Le=5RFb2~>^;jht zgjRR);oA@?7?I4Rk$e(pvAhlq+~7*c02DOYXETWwo_V)91P_~Zdpxy$AJ&tSfgAq2 z6i6(nTr)X2aBwsD(iZXsh4Q={GjwmIi|xnGCK{v~8vA?I`Cm}0sCilhv5Xc4I^+Up?DLgk}u zRoUY0lR!rKLgo0cN(?Itopa@zLiXUT;B=p+x7SIIMC^G$=yzwU9J!TN&sS+Dait?D zd=XM`XmEK`0^t_p?em}qS0!Ix1qQIjyylYEJ8BQDXr z=%wJF1zi-!8V93#o-C`_V>Q5pxn5cLEyT%i|XB<(dCGW?-3Vqd+ z2i-l+2a=V!J?^X>?r*g3SV@86NHvXOCVg2E;#Fqa_vhK3cJT6dd{cv=m2p6!jkB8K zh?kz*Z_8}yc^i+qnEbum=ClRu$JfF+{--Kf2r=Tg2LiA=UHhxnCzx6yt>IZNN=> zAH4onddx$>c{f69nhOEyD1a?}9pdF8#1MpBzlcB01gYu2y-?t_==v<|;D1B^M=jP}y_Ow#Rrv5cw}f%#BH7x$DnjkPLP)BH_0)=F3MZ1T(gFIeFygNF4g1 zBF~{qW!ey}XHHFsm!{ePWmSYGe_`jkW@h_QV!-o;NIit}0(*tHcFUOA4b*+jGQDY; zcgQTQ!et#;Vo4mWK1Y%PG z#d5t6eF5p9O41IZHN>u4WgztMStagJ}{!ChAtD2qi#@|7D@GTyr z&hV^|waWmuL4(VouASO~qPn zkKhmsgEiNFOvG!cD`Rmg1@Wac4b(EWdh>ku@?5G|>O9!wc|q3MFiQ)GcEa~b3A;me z4H3Erq5SFy>L| z8RTftTQ5lXF4QGwCHWKb8HQJ%OW@c^v7=8TBpjKLyCL=CE=~E}A9?f*-4|mgi{;x0 zL+xOc%;8o(>1X9;;JletkV!J4%A z)x?b5bD?@o!#+^4ddYH7*@rDK6qE6WfxW9w8)xJM%jzGwn1GPZwOl_5vsEC9oU_V3 zBa2LWLAyKQFtS?ha+JU6$9Sgt7&40gp2P|6>Se<&-nL~qZT7NK_Ez?O;rkW16(DA`Uhc`X^oLU!=P9DUExp82;|`#^C#Jxz3$Oc9 zG`^_mHENrsVEH{8B)U8Xw;`GR-zg`SO2h{IAddoygt7^D;X8x7-> zWmF6Qf*9m)69k;MIF!chjg66K)(@K!P>x67=bP%&NyWy9o=`+C>DI9Ym&D@}UOnrW z_UAe%Uvu&`k>fcwH}RDoFQUWIEI>Gfp<}=IN6|XjODDsxZur%p>?p}8P|)Tk0+8K_ z!Rx1E$Y==Xf)-ZUmITbn#AXzT6{US?vIeX&s_jA_I-bpqY;&OrWcA%*tmG<&%v&}w zY)yLE?icr6pw}kn^I&T?x!p_k>8f36sBJ9qk#0dH;~;_ArEzJ65{BgO?ee6&ad( z+mQU`qxG?w?CjMh?i-jaJgT zfO|bM;qAdxHt$vCKGHNdx~!ju z{Env>E^VBTvaRi7Do?Y@vS?XBQfLII(9J76#HHL&UUlM?bAy!a#5bq{K@5zfPngT? z{XLH_g*wg!r`IK3M1*>jyrBGDh?EigqAnYOeQbvbvYDB@9kJKUnl#K_*R3zTd#A4F z77&-N5C;Hbx-Gg{y1|19;K`lGY7(#S>Z(kgV8vUd9kTji3pdU&-Wo@fzTmIEp6UWV0_9(4wYp0s$ToQGkVolI%tv~Y0Uby<$C{V?p9k}Ft=kIQ#NrUz0#%)YVfLHqk5e;d8{_tC4_Z>cJS;D2 z!gP{pRVz&j1oa{k#(ikYX-&y+dQs)_{k1L)LQ}2NhVhkf6?)%wY&H6X|Fhdz>|er|3?r-H5;sXTJA)0TeYr4?j+i;hJ!-N zD1CxR#{@;s@-*>kL33@NCD<>P^l6-PEh1-rk&T*hPctDTQwi(9{jPs%@ouJ7Y}i02 z<)VN_9;1FT zN!yOW`5V=wUATNaIg}lP687pamrfU+qclC=HtBsjITXDG%8uBtg}S1b(r}kRa*oo< zLa8(z$Xwtf!i^@Cxp^an&^DD(Nh=Q7O)=dO^z$dSBW*h*dpG;!zAdEfuM~@NO6Ma@ z@;r_i2r?ST+s?ChXV9#EbX!9kKVj@ayq|3_$U-hQ_ZQg93-xV|z>s{3A|<6E<%BbS z%XtXan)GM@R?x#57xUjyw|m@{&VRl?$qyi=vb7r($uk*rB-NLnyrwTc6Et&C=rRzg zl}vFP2`hpTY9%vk2{V1$W2s0n^yln-biT16p_gmO&>%Z> z`lc-BLo1WOq+2w&!LZgyRXUV;QgU2cl;U)VEE&0XS#VoUhzl28u zw`N;|Hg}v8i1c5PK-HaSPT`$9`Ha`LyfZ5pX>D zov{dB>lY?=50de+;b?&}DcDPYE}x@`pWCJrpQFb&$O(qy@uBk?=$qpN`!s*OG0CG+MW4&l=-!1PNj z|0>nY%jP*=H#n!A{hm)KKJI*0Je&=FqxOHa0^H$3qazL(y0(420J)ES=N@0v*7 z@A=ozALpkH)5YT~fPD-tC#`S6^4K=XY2YC#+RxnY6|x%6G3n|s`O8enp2^l@=Iw-z zM^D17N#gi8l=dn3c?FW-yhf#Wkg*4HoDYqTFMoIyCa5!|+;>cekbSM*Mrs)s{E#W{ z{Tu$a*w-DMB2sXXDv!>sk}NyrSmy=vE=+&3BsKBmO37Ew+s5b&79|5nNt>5Rm48j0!o zi7VUx5PQztQ%JW6?55!2TVonl@DYgIkYr4%n|x}(c#hL8E6>K;kPrXrTMJSroEf+7(oVCyap$EWTeUOxwJw9WZRUhSC4b06GMqF} zKsZ~=8G*no;Y#U1GGpM`FKpw#xaqg>o=@@v*2~j{sKIbaF9`Z8Ut$dYgip#POlXK^ z2K)eiv%-}+ft>dR_742RfZ!Xo(eMklaqeI)`F$x|;O%Ix^}QKCVDb-fJAXIkD}Imx zulTQPujLyL$E*flgw#3qNeiV9+hSjc)H&4xTzOfz8T39Y(_`9H4<)a5rCp(u+{Ezi zYNuN86Hr|%fsj(&&{ke>`BM(d19vD#$m?S`hJ6^_pvP0R$&H=PJD+ zl&SUmzfN2{^#=Os7vC9F;`56!sMor*Lvsv?oTaJe5%yC%$K=MWS zH3Z|#k!Hm>UUfpu{8bzKW&#d=tBnsm{i7;N7Dxy=;mc6+2;%ri;`m^!IW4nKrVmm6 z=zM;N?nquAvi0HH5zZJm_95(XoR3YQua_<6U1m@^WnTYt7ZdbI)$eZ8obEt-`2`n7Rb!5hQ3;l%LL?uW(M7!L zGcEtT4TVPR#4#LPYita8j{3YJa=j>>12ZOw{NV#0WFIzbd<#b`Gw1P5>xtmoC56G6 zON!$MT>rQUlf(%V>Ad5_3m9uwRt1(Tv+1O4%uc)D30EJvzkz15MF||G@qV1IpA{gK zpO|05tPpmb48fXpaOr!+Q>`}W1#?*b5kpipGXfy-8H}B7$ASXJM2@aL3%N|629$T_ z4G0IZaQ4B~x-Y51hf97UVM@Go?tIc~5}Z^C#2sV^?Jtd2!Sz0pZ+qqK!Hz2wRh^Su z&?LY0J?rClLjI6(FHA{t|3)k`Co8QEeMy~ASV%<51=dG(+W6bGo=?b2%rQss6w^G0 z{BeM4d~}{@9Yi_&aSF{`F2ZYa9?mlebMf;UoFE(z3OP^w0@VR`^X@!89J3pV?MKYu zhVF>Zi?}uv?Ffzws?jtN*o}8W`32&V=bou^VT~Evin)7ZqM;IT89F4S*9oU41(v=H z-U{)uFI@c&Kwb{=hSiBk%(f_gQ5boI3u!LIQz>Fv$E>BueWs3 zifilQ3+bDF&Zu0!@?&ev`w*vYzK1F?--T;~-4J~1|C71!)>qvhcV!7-&Xs%ue#+q= zIpJK3UHu3rZt9|Npbzo=W!`z#5xg`?{})JoTUa7@rgaC@LENbiijx@g$dOU#7L(Lt zl#%_W8)r*mpH*V>E!T9lb`w}Ou>!d8?6qU5Q5n;C) zU|&bvSx+3L=bBS@AJ}>@^O8gH6}R0I5^Edy8(T_>V&fofG38TQEsfR=+uD1wcz%1# zKStC#OevPZk+XMGF}w8D^$&c3#ron+XlfkX7_w>wKKdsCu?1I&{NNDPO2y>7w3&6- z&~bWQW)P$SE1ugwCp>w%wUUkcRO*DcoHLexi7GL_e;?Z31Yb!=+3-C=XJiIW3Up#KwKUG5E>qzZ&kTP~(0$79>IWD0UmAP!~g1gKGY1eSlk<{+n<}1{q2~#ACLz=eD`vQzFw53;rhK@sEfjk276}Gjp+-} zdq&v!NUO_)z++{+x?oHfbIAkQbO%lwKV0;pFRpdli1-<&e!HLn7h&x&X$0?A1fIW= zF!63sf0DVUcoa?@w4_S$-nZkr43y-X^=4ZMza^(4V zI#!hbgpdRsLeB4QgxwJXLLHu{--_QeJ@=}Nco!xckG~5ENJXUyv(dh~TChwKLn2@F(EypakENdZmxeYC=A?-InKTaFREF}cKa6M!j@!r6Ad({Bt3OWumGCuKeJUo>)v5wOiVLsKPzr{>-iZh0M z3l?fiMj#BQS{-+vMRK+i&^91;<$@upGPnVJ?F83$7Z=cgIIr*;`TP^(sf<$jRMYc?tJ^MK60A_ zzQ3;=fZ~XY4MhjA!HZ#LsU@j06dgqdf?~9=9VR;{j=ExgVSBTFLBS%UlTZu?BF=D; zhz>&%3JX8~nI~z$091M^N0f)0qaIoj$9*;+&b?diH$*ji%15XO=cp$RM+@hm9h#x1 zI|`3nc}k6#>8dhEn$Xk{)g8h&`odzWZui^JEOIdnpx*Wo71D3#cU11J9%=EVN5-4K z7&|EjsRJl#bi)jNz`3k2Wg*LQY2$LxSF2Wr^=&!1nFE$2H-cNsW>YXQVnE>Cmy4I) z6S+O@1lbYV@AN1tA8Zs}mxxy!cuzhiA6#hE>!DBFb}4BnLP!pHujhm{`qc z4-Bg3`pimYv`F(fqAG(j+iM5g7=m}2L7`3lZhhH)`*sgC|5YMq^pLWS3ytYEUtmCx zxgQg@#qx1okfg`90-*=1NwLpXkE<>tX0_NXCwEmF#q;Jg#Xvnald`Vf=Arpo+Nen7 zAOk}&pOs9rpTP~K_T!f+X>DU{_13${Zjla6VK#>$_u~GQm(!7zLiE6u_3XPlUP?~Y zb)_AYh4PDxo=0oaT#9*@;bNmGgR3(so7wivoykTXmB)(J2Drm2xw8TjbLxQB{2`~B zu^YQN(A_W}M6I^vxUfAMW#F%Vj$BBC5roj8sO>%Hj_IQN<2==b^ zz_QJ9U0yq;Pg4zMq^&JTRl2j3=Y*})$209NJwCny6aJ9>utw;&*z9=t1U4hjJHP8d zKNDHbSruYz4)CSPy9EhBtrKXEfHVDp+9ZWVeLM~;mNUC?3)%eZI0cBd$+3mym8UUw z?5^6Jb*lOL0-h`TAH5kOb~WptYVDY$yZUPPNNfGL!f^<0l3jMNBJCx{bL@H3$?*a9 z7IBExg6|f)Qb?8)tXBC)2S)c6kbsU)64)2&$02>C0Zfl});` z)v7tZ3FP)j1$uJ#*?8kp55^QN*zO>@M3q*Z6rXB}yL}gTv_-rKbjN_@Xy!nJiXD67 zL{>I>6)?Xnx)^>6Y!D?_QT)he`MaP6a0^){?}%~usu%VS`lz-?-}#Ut?r~{7HaKs& z-zimv2jpB+)|e^t+G@Xn)_^58x>X99AwYclYwnr__(duC2mO7ggZu}@ztNC=!%*Mx z%J0F&@5$YI55YgPdYWRie5Yid@L4S(+0B)YSR@B8SlgkNt+@aCh(975j4@nCvg6aELkZdA@^O#k)L zVS)a?@QdyL#4nNmpqQwGhrO+Xk?H@>kvLabPiD{%&0oKKHa_*$sLY%QeHxwNyy<-O zUiz`zl%yTh~X3XyW-r|Ur5>zZgyG!_Rq`r1N1)D|6}c) zf-H-gcHJ)9wq0GeZQHi7%GG7tw!3WGHoI)wKK1Q??-S=->_6flV$ON7VqMM5e8-46 z^2t{eX`T4aF^KSuJmr_LNX&Th5=(HA`cDK}8*R&4o7{2uv%hRqE6(&S}|8N;5FF(RUur>es6wajIsBFu)C0(i6msWUY;TDxZz^2 zK2pCj+&5mcq3~aH^FNG`dAv^EZ=UaYIz^q{@hn$=vD}3%ic1h7wg9pspZ~!CG6J0c zc>M5xBq0AELn~_vFf=g)82;Zwn`A4$pny6u6KuOF7SD)}ilh*?hNdB?pa2U#?A}B) zMjqZdtJBO?UPxx!_mb_MKRh&u;q!|>>DG4wzo<%ZG{M8Mpj`6KLb3poks^siE{&D$1ds)R3vX8641n_p=Woh{ zK019_Wy>V%C({JaXKqd@<#)EE(=$k=R4I3qUt{i>g}@`I#yWuAkX;2>!SL4Gcs;kC zpEZwB#tfXjOBIh=G*)-KGN``Q1jZ6|h2ponpkWMUdS-JNyM6c9BO`jy|Kmst%HB zkK%_j1k!z$GNf+c2@e2#?ZF^4t$|nyF!=-V=Df6 z;S$6LZ7vRS0au$Ve8bThR_A8`6TkxslMfg1iqid5PX>KcZw|^hnN1qM2HSDqY4hE1Fq4jC_Y`GW9}6H{jo| zgeSu}2fCbqM(9F;dlJ0AISvJ4wtPenM(VG^)rSl>VoLWMO8EA^?z)A>m!seVU#WV& zGOT%jK@KZU;`*$>b;lI=Qh1TP6tzS86p(gXEyr`9^;?zfABQ9u4z$Np&2RHa9*a-h zlbYY{-zJ9!xfDxe`WBCKZghRNPNgY~R^RPnrUyUKn(BoUUy-a*ZaKfP=8_bcSCs)F z4-)RlK!VQ}-%wf{w&{9fv{`n={$Y;uyM`b?G5GteGg^As=2t)z+vAvI>!d;0M*BTK`*6pv2;mlfg#2bL+{W}+ob}NI~>@YsuHQSQq zlw&Aw^k=9NJGuZ?X8SIzH|aH>P=QAHBms@+QvPQhw=+p=;k3bB z{D}=`8G3RlSQE1GfVn-C1(a980bG~^-~jdows_$>ZzRaHUr@Z#Nlj+;Z8Im)#^R5J zZ4hqxjtF=kRC1049{5H%;s-m}Vpi}elKPwk>OtFas`-1S5}J9c37{B@c4pc!&=19U zfBCF{Kfu~wu)$xYi(Y6FykwbB)|r%1)9Y1}Pd8I6tq&^nR&n;cB7MGMT%9nhZgI&Q z9%!`Y*JLl&ITs36+a%qa?8vW2H``}rt*t+2+ODS(#$+TEdamvHr~f=}InQwZJf?Y%yV-A# zzr8Lbe__&uLi-_1BQl)s!vxtbQqR5$Wv`rcsvSrExCb1Uo_L2(r=E4n9Zvwdb&hEO zKIPNrE?ar`$z1xy_e)&-vd5W#FN*AM&EuN$54h}}zxO>SAHTDEs_xwYU)9sw3$O27 z-vY-3XCF0O-x9|J=N~B9zUB9UT;G|~-_`fNCm*?7->K953$MNa#iB@LT@JvUJ_ZPY zQ2`?mz^H{`3{b3!tZU~`!$1Kz#?VbK4SBS4=wo1<>*&5pB74sF2-*P_d*rg-@+j=qd+!;m1>sd0t)eI`R(ou+8`a_5 zERLn*K6&8;R(o`^8&%<6ERHqg&6a!O8LjdtvDSM~vOIso|75sJp~zeBjbylsq0CtA zQDm&vgo9+f{3Z7(3D33Ornq~FVzb8W3T;cq$ zQC5X^(1BZ!Ux@rWkZyp1$w0x8F-3kD+;mX0FSBl>$>yH+AQ1u zj1&lk>J0b80gbq*4fvdcFRibFd;tL8LuMI-dOl^~VVAj)b&SGSKvo`!ukHd|iGZ&N z8KQbwfbTCIz&rc|O7S{_E=)1j^W!(=cYr`U<9cy{ouRTULdmh`L`G#V#DLk6Uhw@h zkq}J@-u>}DF64l}n+C9-vhcJlG+>b6YUvSgfPNVNO@F^<3Wp-R`Izi0%o!4WRIdFJS)q$nH74k)S@(JEp+C zgg55EzC<_p{s>SX(Y@b@4q|x({)JGu!Um}RbD-piX{JB(2iz_Y4O0nLosE! zhM!PgAb@?6@cykOyK2l-*AJ{+I035@?g(Ex;J+gP^9{luH^?J7l;W`=`O9A9;n5kS zBCksBSOW8t-@y9oqr7kf>xmh_0%*QP75p^Tdet;tYkLYXoXi0^0s zhj}A)Wb`V7Zjs%nbL&jvlhmg6?t?xdz3}?SLVC;mIE{Xb^#a`3k zT^zf%#&j8mUf1g*y$r&$*LOhj1o_buIwauP5i56ucLn=({0tRf=>Ao3!3XNi;6Dad z89I{pK*KsZV&-|(niPtzYW3+i1ORFE_xgA?`W@O~9=wpJl(QOV8kRus^mxbbv8XTBCZa zKz#&ueEt&>q8n2GTom{C-Wte{klt(9FMUJim+v6En;H=ypaH~S4YVOpsn=joI^rLS z{~F7vHZH^g+ANIdgGU%waLEuf=D-bek1|QB_2n7PkwxD)Z1)3JI4W-|o-&t13{p6% zz|iyqBgEkz9ymLM^0)&|vCp1agsdl~w+2p8lw=s`PNX}qmrht_j~k9LJ=Zdf>cdr7 z<}en;eHan{ihYm;c)=r)$n54Gjxn)(1c@U@9F8%P-!^RRgGzXES5J8IkVts4aO?Gh zRya!_A!n3;Wf0hkJ6zu|Eb+A~L}PRyq-^&b(uUBXfyfHV9%MA5aJY%K^Qtdqlh17wj}7Xl6V}yd6=3Ho4+>X$0{OG_Z*^O z!ThkVWmvu4CA1fk3Z4$Q7MiQ%H^48BO1o- z2&^L`uWmmMoFi4w3}UL{<4;oGfm&1>V`du}8CvfyZ)CUC5RY6`@yqI=ZV`EB3mv74BF(ZU-MC zEb;o21ucZiar^o?ZjM*&0IZ)9lbVz1rod8FWHY;#3w(g-8 zB1w*-@8qFuookb)u>_Y;DP;|N+M}Zf&>}7ISF-~PB8)iOywEID zI(JBr<6AUP?hJtw-jqO6v}WFpB&E_?TCln2ibn;=?%8W=*@8n2vaDcIfu}16?t_=` z>Q;~RdsZGh_K=!*>T>X|0y7)@$$TpdI+TfJwRQMQSGT1lQ_x4Zj&8l?uoCCE4Au9<8ZP&y*&z$i;dRPu18H=YezKzS@-* z2E^!B;V($|w14}48MQUMC%NabY@)!ntv9S+YC4~si=oCN$C&HoVyf>60|`!hgz_~I zrc5J!D4zJZH)Nc@Z);#g6hSav;D1xXGkdsTsiYetS{jn3=u8wd+E-?ynu^W5uzsbj z97lVg0Fw{XD{ij2wf|m?S;wvtbJmJ1Uui^Vg;Oo|Fn-FG%M0zS`l4ieudqgu|M^!a zkPDJMWrW&NdE>#$hVdBVTZjF!(NgE&c2qe!k;e#rLJ+6PUw*M+IU3ImqEH(PIJMk= z*pt(p$D&f4iDzUdS?eTL@#K#zy?~n~=TF-&#P*DmXz5CFTn@Tt0TIIDay`2g){>db z5}rjTFq|Z1t@Np4Tz%K9Qk&!&?~YfrL&#Sqpao`P!jtSs8^7f-C# zNe;!P6o;6}#k`zSM%PYvCc4>n%WPSXVNuOju$Bj$=^AZ#YU+69P@xu~rL4HsBScp} zEs0&`zwKVIx32taM%oa3{y36$((Mf3ePGYyYp3{lHoGe(H2j;BawuJpYYOXG>&fRb z1^Jv{l2_NdS3IqX#GZNBpJOcXTF6IAw9=lV8DzPf3D}C8%TbsE8g+oJk| zwY)UB!HX~?!QQ#GZo&C>xEA-$F;+?ar;lBr^|=qAhV;hOVv}#G*3a5ShTHVv^%&rS zli1z;ZpmFqTe_et01so^@=R0L8T$fv#xfdRgB&Iz&{-DEG*pI-c51n(^Nsy;?$=C; z)tYY7N%SBYpA~AH!aOm$-Z5wvf(t*-KfDA3jH())3iIu7kkzqE!lQ3+cHyG0bljShP8KA^@)DU z4$$siWgnOt|Cw=lwE5>erDm!*HQa3*F3NH{&Bii@+2yYhwmK6y7fNvGE~)AF zq-A?!vV^PR4z_+vLtA!1?0MJF_KVcnV!dQt$w}1=M-S7-*WhPx>)iP zSeOq-TZGPeT!jo1cK7>7?}aJfT%WaA57Y_by5;Bb!cXt>laZ&@ZYyviPcOppQ)mw* z-flBidSd3j)4q`pKX8q5eysd$MUe5~Mw#^4yn!Pt;Qi-7 zpm$Ok25NNlh#vXHuM@&|45rVldoFypws}dA((;JwxztcY=n;p5;C9pE)#g0aLaP=- zTY5gR`;g>ipeg*6YFVtyoiOrPXUe%X^5pJIGodn1GCYf+tBf=cq3pkUQZ{jZm!3NR zz^N8ovD8sFcdaF_?-Z`u!Q7+J!z?Tfo#84iv zMY%b{UiJ81M)L@z>XZDcik9z)Y}iO~E2(zZP#;$8LH8ZfEO6#kYb1ZCU4d06!5zWi zrxgcP?P4I?d)<%`>RBm&oDhV(J@A*)tM*?|WIkP=@xv0vA4Rjr(EiQAa4;>D+#0x< zh&U5t;x?W$5blc`GTNnMtH)wraQ6NMT{iXfH$XMq<#vSOm64ieGSC0A6`7*733l`Cgvy$%@4&Z@?3pyo29ZJwuZ)3k| z`F%53g}v#Tb4aSm-$r4zp-YMv=1bYZK=K7_$OAi1mjkmBA8VmNwUm@B*k9F?HJqW-mh=9(t!!`ikhXFZ_w$3!(Xd;Dz3Fz&g!uESfKTv!{J*dP1D@!9TE zqPEr1it#P#Y;^o~GO_vI%L?NpzwLJT(6p%^6?-C@7gJs6ic~%~k76EeY@pw6Go9rj zojR@G97ioBX$gTPQz;xsM4HOJ8s$T=JfBfZu#q7U_mq8MN)p4k)G~!*ZCGnWHogM~ zRxoZ^Rjp@PeSKDcmK=>_U-sAZG3_k5F532MY3?i;N>^96jma>uVuD(o+UX#fxMCzp zwd%n>tO`F6`7wY=jw@PaDMSsBJo0RZi{<{d+I)Zph3(%Gy@v`;1{Gd`Y!-o@ZE>Gx zN}BWgk0|~2iKsCGVa}Yzzdk4XI3C--N^3IFq7#xFR@VBb!M~8@8p)wBsGoT&kcKQ` zi9(h|22-2nlv|r*ZH)|-y+{LM{(S(W4aqOg{ayhL==(KYl}0Y-l~ZJM|BADosYp=C zTvZL9OfvJp%&~beGDulL&qE;3?u-m0G(J1a{MLVvmY~n19iuJZ3IG@1F){4=zR13w$h{35bO zDJ&J0fEG6$K)Sw5q^UGiqm0VZRuM(i@cl?jI~WO}$y6CtTua(aFV|#+l7@jR=;cC}S(v{%UA6 zsc}CGFk~rKj4|+lWo%cMe@7{#|K@)Or1<8{7bkvl1oNmyr(fRRDm1xP6R63D1+A~6 z2N|WvuGd-uow)%e=Uc#bMQ)=y2m*+P+#(3a0wPyLd7-D84+78){}6!(I3xQTNUng3 zmaSSz4?2+V4`M)<^hQ+uWd>7QiXxAkx1zGrWt)llo#K@KVtM>`$=U}hBu7A%iCv?~ zY=b->%DCchWyoWZp1aXa_>!nv*m|}G-%~~^{*e`+%aL)6PR$%KUKjVa2FgYOg+@X3 zaEd+Stgf|-5tE@*?m3$LSb27?%ye)n#@&2~0aNfK#;hftkfok^`4n(qsnM za$0!av%~Q^cR3x?B%+JdW1Q3qAVgsWC6mD!{JR7o-6cW35Cdsyxnt9F3Ns6gyy~)B zjH9TpwV=nIx$5FBmO0)-i_F4F_bdi%)+v6z{m5*lp*TT;T z47ROH*1XABNvH=}GZXT~YnU*WtC0rUNseOY>fd?@eAkjvc6HA&kD{=U+$U#Y~!ARJz`RO;|JikX8yaIa!5tQzg^1Qn8{f5=@Msisf z2eFCKaG&cS*8LJ(X(O@Ux#C9TIjN$b;hUsE(M0yvc~Gg`h}q_N-dcoQ{FM(Nx#$_M zq*Cz=WUO|Y7PgRoa?Uj5lvS}Q9nep9LBezAUv&P0v8=SAzL%@$tJ0!Z-jN;P2wsMx zFwf}}8zq)6nq2y`Y?~DwLzjJqp9P5Cv$4Fc%RH;h|Mu+_oqSQ1Gpe+b#Np;yYA3{e zVD}1aAfCFca&Imr!drq1p9b@FMVcKlPV$%XFVrL?lFvi86Qfq`4YMbB5+gr3;GUO z_V-v46((%W^Lu&aZQ6R|GUX8qQ_*zad>sE_y?D&?8@YVOf%rNdjoz>bFUm2cE^gZsEHVk+o{IwJJNW{Z~QO_~j*d2IkUz(o& zk4&r_*IYQw&Uk934B&e`a?ywCJ%1pRhZ0PH1#JZ=OHg98v|IhTcf_?Z>iq;208g3s3P*(Ov^% z$#7(0gl=9otxwcwMz_MJxR+?8Tdha3*KBraTJBFT{pD93+bLhhAd+){uO5qTmFX(WMaBPOY2x#_7x^zhFGG+ zGzpp5LaR*t#oFrmJIJc5wm?L*m?PAkIT@pG-vw{pAp>Ep!N!{92p)<{HNFUkTBccX zG}c*mlW0Kz6%)sFQFauGZR^NvCx9 z%FS|)_UFEEv-DhSmu4oYdT6A~aO_Y;ymYch`?*;OGuqLmdZr^v^v@M+|8C>t%gQV! zunW%(9*X;VOC8OTjNoyb?q#r+n4q7!tZC;g7$XZ%uG5CCFXpOoyUJ1x-ww>2G z5Q|U5lF=!YaiTX)&nLJtZ_9lJN6xKA+n!I6dMkKQYVR>4DWf+~Ot5Nro#6{;i*wEt zus$5bvV;^C3Appjk$rha%$e|hCsx*0Rp zvFqAmJZ#sx)P=#e>k?2`pj9v0 zm_R-$8A^OfSMxl&>5$q%JPqDPbI#km=^!1kO__JdNdeR2oDVx4D8)fQ-cL@M&+lP; ziyt!~9T|U2w+-i3YO$tzUThFJ;b4e<`v?7tv`7w8cmt2NCd;MXm}|EwqpP+P9|xL- zYp3rLi={e9s1@6i_Q)r47}}<^nAYt&fE%ILf%a+(LDY_V{~~D9p>!?&+Eu2WdYu{l zk!yCxb20on0L5+)YAY!5n=dKs`$oE6_9A66{IWm%QtV?PK?qJbUV;**zJq3nPM-A( zUGaOueoE|ZS!Ujz@RS|td*b4y#dNPH_Jv^QjpZgUn14;FNsw|J0b!^AA{ev0uXgiB z*h&&gT|~QY)&~;;BGI6f-_;Z;-ER=evewH2*GFH!f#3qok)syPX0IFtMy(_6Vt4rVTTmmAA7kPbj=jXiEA&uih$h)MkS3WmeB)0J@fdG_`5qKN1=pt@jHL~(Xcbso(|gP{j^+{h+zItY zetq+?sl$LKL%7%yJ~??D_vkvLD!H7bXP$AcJdK1Yr~^C3Y}+5}Ld)ip-M1h4itU1g zzNOL|K{)j)-mdh*Af}2TEyxjf8>q)&PpL;hx>s&J~nvx~o z;F_h?$qrr)&wXEds2&iUJ$zOYYB4nQ&8^{Llz3i{ZK&}oJm!$AAlO??I4wN0Cd8zn zmjRF$EV~^-^}hLsqV(cKl}n{_VWm4QpjZ8|=`dSne%hsfMlwdK^1LxvrSGolb2j%M zi)YM0Hz@qb8+UfC^pfI3(iJ=e9zkv(@DmK?xDS-+g-4kgeXrw%dTm%2sPtNmV!8b5 z6N^1rQj3(Ns<6`F+8G)esotKTGJ%GQGN84Jr$ z9s4&Yq?=R-0#Na{2PnCH)8308%Wro|7pHzb?swT9@ARR_tb1D_PM7XcZ6?w5U(4DEpV zF%S7*Zr5pYq}Ury&cxpCQ!_)(ifp>|t`WU~e!xOSrmeVH-AKl(1pU)V%oMGY8LjgH zIWg4W8Iq#xFY(z<0w*u-^pAQB{VUt3Y6Dy!GGIjjs3thx8!2k9v{$1W6#b^yfk{8) zo_4>7VJHDN7(0NZTOjt|(q;;UEEX0OfqtZQOZa|WU4$IIT2Sb=k47Fit~s0oCM7AP0^r@Z%j0 z?(uy9*J;vd#WU)@$6lE-KBa1|kV`iCwFtY5Q`b)ikWSIyOzG#W1Xv^@DMh`|?hzGQ zWJ55A0Y9xGhz(2cS+A+n9Mj>o;c=#`ZjMGR+USPW^8<5n*>e)dnS&mnJ$d$2j21u^ z^ZCo-U_Wcrtj8!Zbu~1g8Wvo8-6&kMjX;Vzar3=JU_ftGNB%B5xVf)n*$djWKX%Wi zy+L4b?RFX%cfzb3*mZD?*4%$xD5hbL~7Pi45v z$Ma$9KvHw)CJdrK?JGbgwsw3QJC{K>zK$($Ok+r?4>s^}sxX#m z0Q}si61A9m&umDh4-?G;YJSHsAJI$R4gWVAPW|3FZvC}3*gk;%1#EJf-bDDtj4eTb zaQG&2&D)1cdzjuB?!(Cyt0!V}q{cAz&RUbACx&;rXCmi_y*Bnm_JYQTnSM|ERlo;^ zKVW-Y{AS&qtS5VB$E{$}G3kZQhp~s?xFK+RieT^gPW;8k`wPn#^%(h>V82J+1L8Z2 zKje#WG?)1ks0Uitf=ce80sj-I`pjJ0AnlBstgD94bj1D0tH>BCL*dUQ5N3856{Br_ z7hZCAOSq!ksfN@G{yGq5D&tOC6cFa`WVM3Za@$Fg@UOODRpp19qK*w?@r8A;XO7Dp zp(DnuVD4Ri4jNx*Iq@mJUp;`M6|X2=hY_F?2W+pprSPT>Jea*oa9vm})i1WzHO5WN zq9_vIUmKlpbn8&sWxHrB6C59gFj)t*Vm;*KkIv}pWwegy$BL&8=+-JvZs_MK@RXOw zh`)=;;J2K_{tomZKG7px8#3-hNjD2V>S<`!s5#MD^1Y(`^wgPPgsT5lGb(q3xL8{O ziZu?9o!>$VrD*oQ5)t<8@33;x`-*NESe0|PG{EOGseYPUQ{%RMoE`g1@rNKsX~2L9 zoc$MbzG^b#YRS0xDor^{bRH6TVS8tc*bO*#U#119{l4Knoy^g7pUfA~lj{|cG;Me6iketB?Ap7 z6}uHbBpe`rKp`GM91~8Uifxw(Bp^kJlqHTzZ^chcl)>t8I4$XHsvQ}5_FS|8twnHvYS0>P)#c;GNH64>KwGXy-3E*c}L`0QLcaFfYs)oJ&q7WW;uIFBFkYhS*k@3XcA=Z-X2T z$fZ)k5R8$oYB$c;Iy#86W!O+oI}?+07jo^ZJ)okn@><0r1v(OM%uOS-eMZ%c6-}by z9nc$&ss+mMARD<7CjE4Ax>q@HXOOi3FRamJ0~uz?w#9 zb;#!ct45aIMOTDy3pc%q(}3?7gmzb29AGvg0Jhs&7sD6tpp`35#6S9~`Rlo6eM59OD7DVxg{azji*bAai z4cDg;=W?dyxz+=tVP~9zGhp3Vr_;F?nK+pdwqjdtca3Vz=H#P{vg&Q!%N}6DF$N~)9 z)+1w^pf!%()=J{TtW1PEL}r0Lk5b))Wx;hE;)YH9oh2&m!{@~7PHISxb50xVMrKUv zv9U{PhTbfK;jM}FU@4tkMK(XXqvZeXy#^ESDMQ1xinsyx_o~17XCN5aCRth0xfu^r zitMlh#`CW)B=bn_U0GPHDqg?>I`m;I} zV%k&Jz6cPp@>cG20z^F~Q3rClxXOCqV}e#tS>HE8CJcxQ^y^!>0coxgWrlfRVa zjqrc0G-``%j2YS}$!^us_-8RS0QoDY5dRT^k}LuVrM&3f46fP-kI34wfCeX-|0*McR%^eo8rN zJBzV9B zltb(@jtNb3?v;kwDNVftlWwE)llh5F{R5pe4XCtshCo)EYv?*x{c9a@qNJ4n66uyK z(@$M6R?*27FOf-kNGHuJcKO84v8igHwm}hyKfYB-e3rcbT^xkJHfY(ZHJs zn-KPc^cy-pUs~atHrRcr`Qf*T290Q&5kwojb=sANiUV-hs*j{iQiS^TgS87f4|p$; zoQa&|u}e5`PY5&nftAlQ%`hBl)dh&N+bt{A>k>u}3`|3#&FwCIfsW1G<3l2rf3qutgO zOh?TT`d%AYc64_TrTE~2?^%P{@*F9EcMn0P#W@O>A8QgHb>6H&=`))ci_VSG)0)dmx>HZ6zFOc5`E+(MF z&QrY|MbrI`ZQCB=QcBj)3Qo56GQG@%EL_m)1V zGY$(P+(tyZVv_lGAYLfvRuBeij{sU}y(Kt_4l`yH{GW-NP6Fb}&7rn=+QymQ3Os2< zFQBL&N z>dI|W^^pH*jQ@rcchgjlQN0}u6rPxOz2*BQdIlr`)R%^6G}(-ImQlYxdByg|`azlz zhAI2;%ZGA5b5kjwR_u#m>dWkC%B#msR?qkA?FpopLrpcRqP2W4Kf(iTMuoOaLy@77 zux2MdfC4&)I!%>f1q=^^6jl;5g`q%j$S;I0YawsQL9ZVTPTWCLn4M0lmDEsSgf&VE zbHNb<3-8%MdW3YItAtcW+C6HW3mO@njFzNypK~5|WT6no{<7Ux)wac`3cTNu1_k!= zbS@S4&*{tbZ6$?Vvb@BaLZnSk)8cp$%E7w`^X@P>#=(_Xz?(lPcMcZcN8;LdB58^p zSJzH>gz3t~JdPdbzPPXEH0;OwhGw4k|Qnw z?|^A9(`km6F#AQE7g#^(Gpf15za6nSlMfA%}+1A(##gvW6=)3^ApX zf8`hv$f?u^Yt>7w+GYw7%`+1*2AI`1gx762a%8B@4|t-Ar8-tQkVkt8qkfC%m$LsJ zXYC|aY*ubb{)8-$T52R8pIsMMlxzlfxlQy}I3jUu*jwR-v+VWHkreMSopdl8pL`~+ zDBb?12ZQ|_yN0Ybr&ypecU)as^;cYY%82018Mr4`uvSbN;0-f)mV~S>wY1v69*Zb3XRc(-1g2+~ z7QY(KA!HH4QaZ5S?1?GHhU2SSQo8!@QgY3bk6^;Bv2l(SBp>NE^2JO;pUw%8cFL7P zkeWB#S%%g=c^&WorxL0NcE$xsQXojl=nO|v^I}@(c&ZC_hz8cC=!Xao8s3G{N6D8BLQ(7@ z-DMe~g<9Y?+s?1D-3N7i!igdzVYda5rnbVMV0-hjs)d`SF3uiYTrWRE*$RL2 z-Dm$#|GcTWkR$(3|Ge*iODHk?_xc0 zE@416))M9cw6$crQ_2e-=O=9uPBt&21CD&vB}FA| zo9IHj{F!K7;oxCrmCT)laJHghOK==@gxtU|ZL@#-5Q zzV?88>i_4p#{;U7uzrvt{{I#!68mpoyR5y5i;by>sk5<@rNa*xCTnW@zvwX1|Dlzl zb^s@j|5D0gC`VxzguL4306kNMRY zR7HV3`a1+&KM?_=K8PE|Fm7;{Ig#N0qm6X#=PWPti~G-!75iUWWjL*d1{1wi)KaZ^ zy-E&qpu$TcSS7&|WtywKWH9WGOZ~Q$TCIVsAx?#_5xZq|7!&>jC|B^o`?13@IMTbV zL(Mo|9)vkzi^)C-L-jQM8Zq2wm3li1l!^PU&eb9sad_83yd*1TkjE|GFjsw8&OHZA zX!R8Jld<5I>@nU$3M-pAr>UT?y|GFZR2-#cmq$PYa>gYqxsu7cx$S&Lvs`r)nCQ)?OZxqCJ4wfEzq{$kuY3pQ5G4t;*ze~SBZui%0a*2f*VnT!i0%>Q-FRE6 z1Nb=3lw=Z%rpgZm5hig*V5cc$)v8&m)HBuynkNj*r%fsrFXM=aKfchPB!_>gcBujV zD=HpA)LVHWEk(C6Dr}O^sq^quRF_SUP%`f*)F_LTC(nGwhQPYqbOgC4su5V*#z5y9 zA5+^!W%XR~jR>Vu&i&`8(ydP@|4FLSYt+@YS9X`x~~q$udLFv74eNTDzoOyszTJiJs;SeRDgopL{G;A(LRTiO74SV&CN zxF~^P3M5={AwTRsua{O3WHBzpVN;-erYmEq-bO!7AZOsKu@`JCtnHncDEr_Y?jlo* zv?Rse&M0EO)Vh!w6P|uJtXN0wkmwF3+>L}?=)&sRo14E;F5cw(Q0S&MJLJOJ-h@*Q zOneL-B~{kt`RQbjdOpz@8{xx;mDcBG;VCnnT2DHg3Z=9Id6(^`31wcXl$TE0&c8E3 zC6P5;C6}nR43jI~-b%C>I}_}?-cRC?<4MJ2_B%ED$h(ZyR5DqE{}?Qn(6n zybIMnX(y5Wda4%6XU4X_ zYl@v`5Qe%J3QlEDA5}d$6RllfWj}3}-zimFMG;4&$lS4UW zH76#WVF(!&Ueh{`Fulug4k@|WDo-=q4U;pNE)Vr|pd;3tXn@h#kj}6O26oHpHer%v zu}+3&kA&?bp7A2{^pM^#je|0`skq3@%LS4>SM80gi3ul?nb1^6`Pq2?jqUgRJTyPC z=@frRwD`k5CR))0E!_AT+wbt^OIgY={!dKp+rJ+&%{%Zz*Q0b|%;CN)HDmni=9)fv zl8By6SUxaBUOEI`gGLd5GdbflW2Cz-{&wzm!Wgh+ixztlj#x6~Ii0V?i|iI7Z@AiI zWiBG@h|((WI7+~^s1$w$$q`a!Az*=u)&=$l#GoLX=p(_5{e#nituDG+1jENO1|=M? z@z|m@T6t#fl|~W^{{hRA z&%hq-SUG>tfOZuJMs*Ga#+G$8yCAG|>uW3kI)E zWyhX&2)IjFHR~Mh8IM;}w`-*)3PoM7t*RXOw!`-a|o6pR45{F#T-D zAq;Q_=!I-;;UXQd8`Lq?>-$MQVrZS8Bk?PC3PU(9lSau6fl+Wqzw8+lw`h;L{4i03 z5V0$2&3xmeb!I=?6yI;gffyK!O+e!{16=e$^$6X?y z6@oS?j`-wseFeSv$^vYco+m3pSN#7GKdrbMo}?e)bAJw? zc%2uH2XUfdSISvG>TyGJx2?&{?DWmG_orPFKO763R)d`ae=?9%INFuGeGR%Za?F^_ zS%dU413Pf#v?hkwM0C2{tY8`r+g)sQ+Eoh;i(Syy_21^}cx^n$SiWy2h3r6_psE+k z%pq_cgQk<$6ABMGEm<018?rjr!8=Y55ngr3-iYr53J&^3ISG>JG+%iFSC=5DjI{_( zi4K^fl!xIJN{nM3I=x-O8pSImqfA&}mh>}khlcEUJ0E)Q--{Yuze&1yA8^? znUE1e1G^Qp?f6NHyc)?F0AE$)Aw%H`)cBJ6Wpa9Sfo)g_$rN{PrnNqQ%-#@#vD+1U z2_9R5Q$KGozp+A2{r?Zr-Z9LwZP^yBv~5?~wr!i0W~J@SO53(=+qP|0+GgFXwf5fM zx$m7-_uTh>j5+4t(R)O+7SW@((AgZWekpQt4R~onMz_K#fSI4}{XjA?CHWMWt6A@8 z>1^#>yeIE-SceKMp`wYUaS9@lD2f575aA(l_`WZw@YTLKRFO&9MC>$LC*Z;OmAbS( zJmH(vWv`Y?6b4S!NUu$zVS|aGh-?;w)zR4uYc6AVEMnYA5}Xuhksj-rA(U@AU`xV(l(&7 zo7WdiDOs!nKw(J4`LRq#Dvjm1OP?Dg4|rjouXq8va3Q;UQRO@+*<2cj-_+IPp~{X z?eO0u_vOg}5}-Ok+`L9baC_Y3e+2!#LS73aO!mOlhxO8# z)7gXx@HCm8#CDM*G6d6-K0YcT>7Z081!_`@RjaRBE%|^9l(yy#qI^2GZ#;d+`sX1Q zk~?Be4~P%oEu2+)Im@U9I%- z$dScksL}-9Cb98GjRcR5O7VpqL)qi5^EhF3^H#xb^J%Jt_mfghzya?okt8_TQ$B*R9#DA90grS9JVpxfGDrBZ=(hiP4MtjXj0 z^2Np!_~|rClsHD?b*ND=0-OGEec2`LNaZ?)10w%Q4pYzbB#j>!~tLpQq@O zl}8F^ris5E3xAz^p&X;TQgova#%+plY8BOY2+diSxecg-q^M?PYWAg@i&-ULqwhi-TdnFTW>{Sdv-nMIe09an(j z>PJ}|8lYsjlFXoe90y;)^IrCd8vFt36{Md_gkF0N zNLNA3oZ;B^yz_7WMD7ZG=KPFaP2hj>Pv-x7|NNWyXDF*HBJsn2fW}%#sX&mCQD8Fb zgAb8x4kBF%$OzB4$g=bqS0wjylRcBad@5;knI`<0p%^pqj$f@4q8C+1(RM3z@9cCp z_Ii7}f#E|Gfp7J7heSnQSH0rn7wHiMi9+U5xsvL!2hrkp!V+Mk7>Op*0JjX5u%iOT zgB7_c@zfZ26zIrfwaD6}+pYX`)Gaq`t9u!Lbxwru> zHWNN@S~o43IPDNsF`?V4#AL+%Q8BEktG3CwIfqz<>_)bi9gqD?V7y zw(38lplj`?R)jT(*Faog#YFx_r9R^jpK9EuzDAF;gr9LmLa2Ef&vGK`9pG{4mlKUL1GCKF5~OP!Fv#QYxGrz@|@B;Ph75oFXk*K>x9lzsjpcSf=bTvLh&2! z4?-ZEsG}k`sjMAoEsNOo$d`Qu;Ee77l4&u*_?^I|l+&)iG%i@Z&dQxW9sA^uHM&1j zlmA{m=x@)SP@4EG4#8)V=fDO47MQWy>?~_9Z`zlFCr~s*#D8u6nlr%3tl84GctJu| zHRuDj&HjQNd_U+k+YezS^R%sAb$IyNnX$6v?ezip1IY!arwsf7fkSi%jn!jZeN?$O za8fM60ihH7n`sc(VqDsD^S2b^EaL`e$Q3k?{g^~aJkKXMc%gr(=PvmT$@9L`N- zz8k56M=#M;_Nsuw^fscsRF)uekBUJkT<5D=E`A2=0^KA%=Iok!+})NZ(jrMaEt?@p znX!tRP^0Knl-RJ>0Pcw@X)_tZLvkAaVt{*yoxWAb2L1Vio<)UB^){(jvkNlJdNl+_ zJBEsV@;1x!XXX@NlHDryTj3gBtYfYeEznoI(W`BQ$beN(2iEIIS=`j@9 z;r2036g?Ak-HefVcvyk^0VE;QFhJ(s%?2gm2z!(pd?APyO0QGZATQ>9+=2HsjGY4u znwKprGAD4-vg0K}5tlMSj!duloglTq)tb(?a*RTXuV<5%SZxv2AmGn)R<1Tz;N5G2 z!M+=QD%G^QoXdw-Y|G!4AsT^XeYt-$PW`wV?`qmU{;T>}>M3}5{>kDX{(l+NKcCB3 zM$yXv(ZdUO#YG43@rRL_k_q@JP8aZp`qwKE$k_qbaiWWcnH=0}j4qI|eF+j+P9p;+ zrs2wT{pIc2>CMyiMam{dFMdaBTaQbpEZeZ4JdyG;d||F{WW&i&jVC3v5HM~Coh!UqTcJ<+lrXa(KA=`2u1%J~XC_orRc;n? z{re4LqqRtr46&dI41K5Z;cbP^HpQq@{&*jW>*Lu^UUyrqxZA+gj@=vU)59fZ6m4BQ zi=kZeC~q&aX+J)Wcb{%@P`+}Lw$vQFkZ!t7?({dn-{=d8YP^5-iLH@;WXH7sfWC%C z7G_RHcC+m5{@c8 zKKU}%3}VnD@74`(oIU`MZ^|mZX*vcDY7O=<@vU%bj~-fj4(zbd^8LX$Nv;3~w7=KI z(5r@~?enXTKA(Ti8mRtZU5uQJtQ=^CfB*hV@$BzKNl@0bMiNE-7~C|(F|oHbb<1H3 z&XUTDWT7E`8LjW32 z++Hiy0bAY!dkB~(o&;qm-(waiF8-%s`b}a!JHilhq%>jVftl1^ypL>5%pj8uEISbK zK1}-OxDpz%x zsZ*~IJz_?9>ZWt6CS+i@&bh`#=ay5|^vbDkG!lrU+{xZ+Xpw6aMsWOe$e_0@mk^ge z9UDIdOWw))N~8vmsK+jr@7>q!oF2sSKLozk@K%+fv6ECnHDGM%-CbPT3Pwfz{0kWCky z&QZ|{t{*b{}(mm?WFKU0XtRg*0PbJAN-Y}&b zkHvux+W`j=IYwM+rAp8r27{ZoqcCL`rY_3AAy;N)26nQta3Yf{rIkCe5zadX|Jd?j zJd&23Kf~GOUfLD(q?Z!M8FQ+lR27}qW8*!^;LcIBKdk!F4qpb^8(ZHp(>K5Vx2`Q9*X^1F||L(f7d_=>@FwLfs@4~&a;DKYt* zv6d|QSHf^!x~A^TosEhtU9KJ?VOY9`1lqH`_aYY)WZEz$JAY9@w*EDIrieG^{1h=zSaWT=r{wYMQLZr`{j9Fnf=EPRxixxnkqXv4MOlh&dNJ+VU__%@8#eu>sWEwY3UZ6_~0OW>! z_JV)07t4tj#AL@2Bs)p&{t+o2?Vh_wY^;*EOdP zH3nnL_mxJA0Ip{$Lg+UyPJyh~1;6T-u0-AoA+-~6PQA`DyTlpDH{-8smKN32!J?sc zh`)H(L_;Q`4fXw??XZpON7x2hOu6_O6Y+8|DWithZxQw4_r7XEdN4(h8>W>0z=+pp z^+d2Brv6cT{bT%wu@>?o+9aFdNbzbyNepu#I;h82#l?{;hVkaFJwDnwRQ3CWzU@CA zXp;SLk3aYJuSox|d{f0t6-yc2E0B*wDo~*}*Rr83fKFT-?VLbubT&XDdwN#lbQT|@ zS2tOlG)`=x^|B4`)Oy=5lQ3u5p6g}7dk63OX?^4iKCsL^w#&t9=i|4Z->fd)PqVhX zz-+L21F!7Bt^IX_qM>&7;5%V=I5FT1+e!C4G4O_;G!f6dX$oI-b&U)1W zl7Pd>z<2zA*aHTlFhQ2e%Y`n>*=TfQ0H~t)2T+B_L8eYqW`E9CRRqjLyYltjI6|}G z=ZDfpzKZ>E3kV7fO&+|hk4`}Y)r=^@2&%k3;0Vy9Jg%n^KweCxtz7d13(%^hTDQ14 zh#Unrmf{9ob!Rs<6mU=pnLcNix~7I~yED2YLLB81{NY=8=@WQW$rG>{%GP#asI_qT zC4Jd07!k1;7%_0}1|i~V?nd6No4l-{BV}Ck1xI1Ral&*h2~D#xOU%4|JWk=b69WWy zQPIzEfA#h(v6aXebn;ESTIwY=hmw5DJVRudF=;L2V6&Qnhnck25!#L30z%d=%PWltZS z1JZY2W=E1)WkGFCM#PGfEcgcz=ThC(;Ua9t!_`QgksU`L6VLCWryBQYDu=nwPUWkO z1G*R-EGsI8s%VE_z+R=s9Hu0T@oMJ+{YhYRS#`O4MFeh(%`glC0xmK8Pa(iQeMoN>Gxwb6rI zg3Sa@Sx@fNjh`6G2)9ac-7pFwKFc%o<)2WW6SKZ1bRvyt&!EOUK3O3XRo^4_|9SVO z)e_ZKHe?i6yv{l)a^BYmXjOj;=~-J~F;IwFr+?cxlHGH27TeSg{CN3xJDAmd4rXBa zM3nyFZ6YOb8264UarA6`_&ElO0;me{pjJA@uu92Y(Ez9gkP!kAtXAD=NZneA?oyh| zsPfyx55aaA!S-UHVkBqi5)RvY7fM=&%825_fJaE8qkS6p)NTMv5S{~}czU5tuV zLTrkiD~8pEWrkI+oKSz{8X%Z{;0)|N7&mNp8^BjvM9W>k`wXW!vmfSh50n}P*UERm zHs*Z+yTWG&)a#;kthPg}r@2~Z{te3%k&5-jG@hsOQTv!MY>g8t|(K{-(bs?tWtA8Cj2Ju(wI*7OHdYCvLOVk(9K9D-S`v-O_ zoWY$*%$m5T|N5M+1Ro^i-;8E7B^U|R5odn?fO(PxD0zxNp#6;Y)xHxmX77!CeW!0I z%4uP$sZRJs^LCxr)4A2!c}|NxsN+LLqublYCLi+GYnTk5 z2q-UTjhpU78dXEwFR|O*Zxe)jNTclIs+MAnq^MmJa2uiPNM5AiIEn3TUlJlO(@M7n zAs?zn&tE)NfcEY@gdXcE?CPj*Bx#ywg6IRMQbH!EW^*CDK!~0GWIDeyfM9Q=_1w=7 z+xUF`IRp5o^2OhuZuqYjBiet9zyBAYNKn-L-39m&m}sRA>Z7j=EPvJ8I4uK$rxXq$ zFC|7sqAx{st+$GEl6-Dem+`G{ib1}e>j~&hevpxpUam}BXEObAy{pdsvoQI9!w2*j z3J-e%iwS$dv7F3@Yfm$1vmy*TN7I8rXlmodS!Ny!)BPa3g0NRVq#}N|a#t9vLqg#UX$hgOHX+zM@q%l>+2XjZB=t8qw;>xBc zEPi@!vO(W@j~kWV6dC~`&BG;vdbopQn=3NS(xwV7t+6n36VQDXP4u!l$##s(r&dC5 z>QMuneH3WruGf^WP0-sTEI>_4xP zMKuzGk|yxyX5t+r9aGskh=*IG`?xRl>UoZrZxxDJ}8jseSw9FVOIU5D@gd z^m_YTm8{R!V>nT|UTRg*Pu1Lg1u_Xi!edBm9YZBgsp~UJ-CF?qSR<6lX^7P~@mKi* zb}Llx zMYA371yfAUK#UFxe)s-teB|fR+4$mkO2c-DcQ>JO=TTmj(g>#K71)!zN8=e|nM>$lA;dQf$`Q;#{t&&8>6rRNDewu0Cn?!AMhJTJ8Eqq%~Xk8LWt) zELnh8Yz+F6;L-osDOS|bL${SE+qxxr6u@0*50x-f`n&)~V4FVAwYmKfd1ZK8c{mhu z^@dGDB4K5(Q$bowLdZbNJH&+$!6v_{@MeHK$W=^MS?hq`|dwTs-w_4 zP`y4@tNQhxZCGFjgy0@&Mb7qyC}>@Fh25o*<3{Ikg|#<4p4=#XP*0I)GuAYi3_ix& zIggOT!SI*rY^8itJyuJzA_<23k|Ej6yhuhxrtn1<8DR+xBXZ@Sk1o1Sq_0T^4Xbc| z;k{m9jxt6_L%BA}2uW3-gZ>A|t^wO4L4rXwg>B-KK+%Bi?N>FWcyVS$;0Qi4n!flR z=vseD4AJ;SCo2vmr0+EnR>L=-s+`b$8aGOk7@+uz`=HU_C2&9~yqxD^zU=2*8HOjJsd1ga1 zc1Q(W6refiKW2WX@9PDKqDB7+`^)giXDGui-J-vQ!g-GDR))_o z#zCJxjeLHs^k<+;y85eD&3f=m)13*_&B^N+7B50DAj_*o z8kwKXbSkELOeRr{%0;WCc2IlHGG_*|2prp5vu~$QgwR|PVOc0ys190NyhnN^#nq^nPlKTY5_e|1B zW$_MCT3+yegM--Wcb0Fz{rHc$A;Sg;mhf_nT;W~*@*lP zVGIxhg<6P1`S@gp!T4IDoKuaa-3BqIFrVuPuI228!S?fV?=*h;vIEoPot%`M1iegc z>2v{{>38@xp_B`&MOGu~Iby~oSLg#SP~@dxNa07y_j!b~CWqDdqtP#3Nn~#>b95?s z5;Gbpx?;B19Gmz|46<=fT6m(dxt$f=mMDEo(28by_mN5`U9shu68nBayy@U~Yqtev zUl~hxCyoWDyg?-!aV{>)ope7kCU_FNU>Mgk66~f@aD!$b{X{|My3GVJj)Nsx(LP7w zUAT(Dn3Xvf+N~cZww1(j5(mRM@Ey3ch~^Q(lQF=xynYj|z1RttEnESTdO?rLT>V2k z;tsGaYC!C)T2fy6fd(q(Dj_&SbV49i8!t0? zyHZUOu^a_2`BpD|etkQUQZHlm0xvr+`BEc47M*y! zU7ldmd~?DJ1BFl*xjSENfh-IIUT4>|Lg>uzaTpX$W=-WT;8QAP3bEnn@&9$(UE*FQK%Q z4Cs+zCSyA_LdT3#Fd7UKB}{yyZH1PdlDoNd#7I5l zC4DF1CeS*@QpcMr37Y{2U3wdF3?z^_*u*#MBc=y{R!mX5K_lee_}V75X^t$Hf7z$e zQhkkqW`3Hry)ZhkD(a#`KV=i%qTyQV1#i-8j+k3n#tn;?YE(2ZUy&0eHt0;AG?bG%(m=4CtFxJ8PR@{t=~f~KK}|C*%(}^}OWj&tkv1P*N4Oo1?t(sd4f|ebci=Km{y9e)eh57EXi%Adr2}IJ@$%#~+nB zdfJ2}RVZrFP~9kY#pI3%?=W62TT;)<+aDAjf~w&2@6<%~X`qtm-Rq@1vm%-%pTzLP zqmP2|-fStb>MK|80`^Y%)ppnV9>PW`-!EHhS7ui~=sIMd3lcaVI|U->y}2j;E2i2* z-DM_!9ZK*tbuZ-bdNL7QdqtB*zL5524|=r_4-DjbK4xp$auSWujYQK{$Y|`E!BEZ> zdl}&xeRi!8Ycw%8I1*I^P1Nzj6M1>a)f2cg+;VBf3AoB#LuV7x#I!Q=VhP%^p(;hC z$r}&4#qAcVrLK=Uy+t{2#k+1QcT#H{1)XH-v3*#0(t7}UgP1qGR$Mg^ue9h@iceb_ z?nO}^evTud_Iz-6fJMDTq7jOYNo=p&IK12`e3wlbFPbC?v_0I`%!A++<2)sE9#?|_ z^i;yT=KxZJF>Cq;Xo^8Z!x+WEOBhWM1n1_A$f3Zu5py^?I)(wWlTsvI%3yJFou!`;uOySp~F5Mt_5oXiKkiRn5>~!g%@K4AJ|Ko58o#z&&2K$=F$SAx)WavwxsVPK&aWDp;Do43>a357-RxKt9HSI`%> zNpRSYD}c8{DE7?r02Q*yA8{#lhp!Lm6D?<%SzTSfzR*P^QN&WPC~>J?3k(#7QiDt( zmy@X}R~PDb;80vh&?EZ#X2P2_6&xk(%7A~W;L>A@a9)Vd&<^}S7S!{nFV=(H!^9O> zq6-p-iHe@4i+aVxHBjmU+{HvKQ0q+&B#qTfTvaTw{G`~7Je}{GRLOys92f>$kN$D$ zhDXJ-KC~7FL*odng6XG5(2OR9a4q%*-ox=4-T8L^E+Pz1DECUXQul~2!m zLt6~t%-dL+xXiU&NTQRl480>*xKnLTKJJ4YQ?^69GOf+3Ig`)xLsr)wt4Qoy6^~lH z#J`T$$gDA(sk{n(W78va4vHIzcfh?3!5JYrB?c>-esMj;VKp6 z0yjgo*Q8LJ_hd)fF$7+%whNvEqXE7b8dW^acPMQmh0phKZLs!oW(oL<7Loi8mBwNKh5&$y4k&FoYF7Heo5!w9GU)zbJ@a>vgF=qLbgTadjn zqp;qTbC2#}_wu8w;~^^{LEP)(s#M!bTg;5Qmg{Vx7 zRmRwBbhi)yJD4<_BK*==#6h}3;>^^MWSY{?;j?_sbwgB#L>j6T7Pu&bXh511dW+-Pqxw+&KTcxnzSY&-^AMcQVSB3b~7O3;|m4oqA+TM=d4Gke#p z-N-ytEd}r`soc=xXOHX8@|8&2`~z$bM1nL!VdInWag1`2c1DaxF~SvAnRfU~ScTRf z7p)Z+k8kX#r7bU*@_{K4<}eY_3sp=EGiQ2qJao*;?2e?Qx;c(9-_FG>u^YY|=-|;i zs?|%#tcM}MQ>~U?xV0!M`2)WqEh=RUx(z*T;al z>DwP3MNDMjBZg(=TvE~ll;!I=d&8fj=gMw_3!oQdpm5zFc24tDP3!MGv^?AU8|cL# zg3?Ue2V&J!XN^x-qa)h*SS39L-|6535cr{iw}^bTZ!=yUUN22`lSG0S4-yxJ??E>{ zensB(8A?Ij-;(wBh@_4*& z5-t^oZ|}nsCA%#EjK=c=j@VeD!ndGO`s=g39gm&B&Byf%=r(*AGZPn;yV|w9KtUvC zDfHCDh2Jnpls~L1Z-Ye8d1VbgS`GL`NSR0f9vM`EOmjiM5WD(jPI5gFPPiM=F&YxO zq=%^?Dne2`u*yKmv$#{?u9k|p7k8ywe3=l2C-EAmGj!C%ujpF0+p{x~{gthX+ksGTsPoubhYwn8P)P(bzJ{ zV(-8DckD`N2gpBZZrLAG)c+IKJJ^|hMmAanqrd^> zTe4onYlZ%pa45B%YKlDqo?V-$z99w@8^J;1Vr&I$CRj6A3|Msx+aqkNk!D{`?2E`f z1}nSfiJi~R+?c(q=srRaVG9bEP+DXzLH-`xPNgfrN0hTYA73Dk+-=Qr$|u+?iYMGG z@+Y6ZDC|)+#ccz>zC-LrknqwHYJjBq`3K27v!lc^-dc-B8r@pSl|aRXFdq|k0l zV-ejIx1P@>+^nGokZug5x4=Y_#x-fh}= z^IWkrg!G125oF!Q-h5td5-s-#wz);__(LKSzRj9l<%*Aw=rC2PyT9?EdMH98#8oyT zR(jk))k?L$kHp>=MO*rZmIIb{%Jl~zavu{bAS4YlpdIa9E6@Ao$W)8Wz-7peR!`mJ zfVgoVM@LEJ(P0VO|i2ZB>!4z(=Dd%UNS3<^Drf>W{EQ z9lA)h>thiiH{-jD<^DzmUo2jb#$TV!xXl$10AtIEKmN zt&nr)v7%ka^Y9);W8f8MTH^-Aq1arW^INay*as2l8Z{>s|IifN;s8j|vtrGV*|_(! zcx2xgu=iSIo6DW&UPMB>uFx?2Y&++Z5G_T z4E`!|>XXRBuP|1B)uVAYph1N3jwA`Fkt`kfK+sf0N_Z`QK@G(=@Jg*Uw@Vn$u+(dK zM7C1NETXcDd>L~KmVkEUZ^L21EQ^R*9ZI~Po%;jXQ@%lNUKK}PlIVN>`jYEoPb>d!X2p_ZoUhnIi}xd8POI?=e) zB3vjj1A64dO9yg!f2q?_)Tjc+ks2f^b6dU24%yYa_+@6okR$R!0mjtehKUcaFeP)0 zxOpQ~3`~Ah=NiO^wUax|LNA0)b84CbM)mJYydh=iea!k!Be*?FReuF}*Ocn*t1 zS*X^6dB@_me^+D>Nh>;(ueM>K+pIU;8T-7vI=sKI4Z8h^j8euk$H!E&BIHxlU5>z0 zE-yY@i#f8=)zt@8tX-u*@jFq8S z&x(BH{A-}lwGir@2DqJ)k;F%)9I7>gD`I%FM1E2%IXOhSL^0f3%um1o@N*ez$Lmou*G)anju{)ux%hEuzKhNjf_#3=GiOFpx;c%f2s zOe+0LOL3%e8CL|M>}yY{Pj)$WGvkU}h#cEkwDDe}(-Cn$!s@P~V$043-`VhQG$($^ z=z{xoj)uw4nftSpIOX`*T>{){hnM-o4)`EQpvUdOv&udH1#rCILh9N-Q`J8=VsiaM zGHdyl*Z{5Jf7AwO`StA`?DP!&Q#SlpW9IK!rSkCHNzqc^Kt#ZXiYy>N0D(A6!GqP1 z_!b9#vQn`Cfsha~B=H_U;Jg66fK-mhnz__o2$a3zjaV^!{eNhba5PP|Y-C(?+FwrT z=yU?2_ZRvTLyRHjBB~Je6O~J--STC}?a=i705h^kIG`Q?0#gK=vvZ&aMt4JZ!|;WE z$8ENW%1PVd5zGQZ9FjmU_DA{#%?;fF%^%1U=!F4CPqRl)V#L)G98MQ#$Ta|jfl$bZ zoE5R8OBouc;z>(Y|bmuo|>o3L^?rY_hQD>2f(a|wr<5XLbguBK1+7j%r5l$7%=L>dQ5kK#_H zVA`Txuc(s1uvnvi*GrPw4i!O=K}3rK^wJ0eBl;TCntQRv65^nODNYnF)^3ezU}(xX@0+qOIM}swO)tE)$Zq!hsl~DA+Om_($C%;X2+h zyo=EA6wtJk9(DfoLBqyY%tiLS^c82i^^QY2a90n;d4t^ABZf0D6%$^bkIM#mJ5d8f zdat_IE}I@Tty(e9AfPSpwaU+gp=6>-fF?`auiCQheLo}mJyBD|zMGWnvSl^O*-@iF z)z5~_2ST2dVP@0|!UcbOF{j8#)3{B5vF$+BXe?|l#=B~C@ocluC|*ge4cYQ-Phjmp zGky;kSx~#O_!d7c-Usr6FX)r?(Tvk~)CW#NCqcW3t}=qCHWd9$p^e*v)#TN`^f5rH zl93x2uPddXoqC<(Au^h^qb$I0qDix7lOGCZ;No^4w}r;lpymN;hATrC%gOQ{(e=L_bX60u}9S|5()^)qq0XuZeea^uky7}4-Nv)ivTm{jpTRdoRV20mN$5D}OGYi@r|6wL zU^n%%0WgR=!Zp7ox@>spUhmw8!V_yXSyPM`5?K+Gp;^9cSpU2>#=Uq8p@snj#6c* zNa%zO=Sq$%(FHJqtW8dLvFGS+8#M0Bv14wVwiBULEw@IwCZA)gubg>YZ&>fA>mD>vk=& zf<2Bu7=ZU=vuD$Ft5lzZs*N#6O1Y)ma3z#cTZ}e6zH<|^6mVb?%me(USZLHb2Y7fh zP|7yG58?rn-X9Vvz<5u!=T0vF2zYEU=CujCnR+Tf$(ma$&g5Op{BA zG==ZIpD;nKhP%jcoryiudqZWsAwK)%gu-#R!DN{hL*XlX? zBx3>=DOq`5Qww-u|1BUlK*Be#CrWOZ`cj9^U|NVI@l|5u^hG>I{WtQ>F)qKx2V|26 zy6BM$Jvzq5ASL5-jdEFx(D%?D&-AsSwpSN(vc)CG{FdoiA)txp#A4808_95x-8E|Z zN&S;AcX0xtI%HrzRp7FDVRVvg0&qXk70`z;q_)A1`m;^whCNcPkd3?^WQn{5xLVUZ zBMtEd=?CA~Oq_L|&IH5hz(5p_NXCeakNj~bx|duvK$b}MG&}7LR}M~1$m86fxO9ui z?fg|DmVs7)!i8)e$$t5JH}ZMIK^orURD!9P_@UcIUfXN_Fv|7vlLa~<5@d;1YFid!|T{Tn7({|6a67xPD#Lb(WNma@@;!C%3g_y76m*r)CRs@)i92 z(YAGC2B>%KLyP+F@*W^nlt20Bk&opcN$fwB_x>BYVCP_F{7D!74n+a-(z1Z`$mur? z!MWhY-T<%)2JNm42uO%>Kz7$EjZ+PVE5rqo+f}Zg4K#V-Q7CkF(@e)wMJcYHRd`v} zmtSaXe&$^4^!JtUb(e*- zC@CIv#WLL%+!;vPA)XTL*Cnzprl1|P@k~sN_sfgco!{ta(i(M1t%%&JXSYLGRNxZI zVfKgIA5Z!d$$<|ypviol%FhG&9^NJoh+Fpfy*H4eZpaWBQ-{4%%kkdxy*|D2HjrhX z!g!IesNPPi_J*-KXeQaCa_J=xOPX;jwIuA^2_5i|Gti;9-Uiu>Szpmzk~5M7#0MI< zippD;p429opj(X}e}HgbflGU@2eWLrNG&i)AI3fXYxR^nXu)5%X;@;fIha0~Ez%#W z6hi-F$Pl!)GBz`DwA1@7ut%g~W^bl%W?|;=AJwS8JL?LmH=NY1#)SFhfae>(jwy>v z1oP41<4XXGkFGg!miw)3IIdLOD88Y&x(@^GYq?TA7dik22GVwDtYpJYg{(tQxpXrfUJ|b7Yy%+gk-7tTnWhB$s-u=A(vWe(i`-X~_^Mm(9VfF`>Yhk5EqO!7_}gFpPk_dX5y_R2Z>3d z1EAR_v!#=Yb^g3-eCIo9cX;#xeLy*D;#PiKY{ESeiXlI^D%zu~u$Xq=~Y?ktA-qPrpl|V{(G` zJ4@5HqOnx@v|-Ud9xAi{f3xGiL(GKgsXfvX%C8DycO%!HMnteNT~UAd{RCtLK{DN4 zT??`KFMk4L_BKqZzdm?b$QPjIdGf-7<83-J@jCPP7#nR5kLbz{Nj9HH`Y| z3|T81{q7qMZkLU_lr0*)<_E6p4u9_pY%2;6<65g4?d~i0d8Y&A2r5_JBVd|(*wXh5 zk%)Ns6$As>kSzGLIuC%dMlpI5lqshRU_yU2ybT1yiVx}n(U7kP`2xvMDZ2ivfp0h^ zTV%Q~lCK9i9N80{14vY_(iM6n#Z&M$H>_%pCHkkrviDHe0E*xwvB8 zU|e6Nf!f{%Vk+7S`x0H8-zu_e+i+dRbv{c$0qI>nJ$IbcT4ZK=JtMoKtnr=2Dt)2W zn)t)YVlsWEJ=0Qk1r{8-y1_ghjB}t!>qi{|VI1~d=fmloma%4uRMX)JKooA`G~zjz z!o*&MZx{4qYrqJipSx((iCL4TMM5aehN(J!@w&kbD%`ym%Ey9;Ysmlw0;kqS7Fg+1 zU3+sPVDS`-v^)kJ9` zIQI^V%obEu7@esNFDY5h@=(X^B<6P%lH+$rbzJygfNQI42XJZFGxv?o=7sYd9GMMf zg<+x^^l%qf;*FV+IAY!*RazOV+8?E?njwVy;`ln36t0-4Htksp1Hg5M-ZQH^E9Q z<0OWZP5@CG94!iWrAid3+B0Q*CSq2$am_X_yLW&nlX&&H&(3U#E7Fglk37im1t2RE zKutOru+0#D9HU1?hz&MsLzrMX-y@(b&DHg=AxS0D_JPjh%D(-SD_-!n*=%}g4!6w)lvDrw8(K}wT@ZLzAj}CM z%fdD?$F3pub|NNrxO;EovUxCVc35~E&+%E87wRwE3Gv}rr)lZ`#o1d%#T9Mqf)Ijx z;qG2|a0tQO-Q696YpBBA-CcsayF+ky2u{%8@;K+-?l=10KBr&be>HaP{b$Xw)|_+A zPqsO~;(2*gcM~a{iQ;(840|HH&hM&qZA+H8QyK>0KSNffPEJo3}?(Q69Qf~2r<4+l8x3Tb+h=}Jm_HMglJn`_*LE$ZkyyAG7E({wrM35 zYVxDsHO~j17ShXFI)bncmHcT{`C=L!z^@T= zysT;%^_D9A^EMj&jrEquOPxVUrX0YR{7gnuD(XEWhIjcH>?GYLweQ2(kFOf zr?Qh1>5A$Eznd|i&TnJxRzj`}aa4+n_|L4uB}%7!INlg!Oc*kUHC0D*(x`8OWS0J8 zeQ!LiK8@Iwq6h>q*A@1a{G-es@zwpbmfbQhi_S06_!v)!9tOxo12)@=BZ4s_1hvq% zNh}`3QD>MlWLwz&5eBBArG$u5#7wUP`)w+~-Il~ui8J6IxJPN|DC^9HESy0R4jEA* zPbHUpQmr2I8O@i0hX`x(+5jD@8glHKA{d9jR8$3mfaY4f zZj<3h0hlXT&KSWu*BAKVE0nc^9K4F=E5KWz#+Am0ElmT*5n}hN{2+gm=azxM7e4!5!J4d_tOptn z^A42r7lMCefAKFfHz^;0-(~cFcSrlLW^MnyxA=clu|n=2;L!gl0nDZZ5c4dp>98c z!84S+woG-269iIF0W%4K$G&oC0`y+{aOJDO|@M+96;^3&au+I{-D3DdBq} zIM4iD#52j)!8Qg*J)5Oj9x+YI)!b;TU2(CtWwk|8b0Cw!g3Tf8!M{MnGC)zGK(;z5b(3^1-FxRB#J81dvQgNy(plS%@ChVjIyV{=l*?LR9rI z9iXps`>YfLDaf=_&KUf9X*`Wh(S-X~q>lqnB{kLwhn>yb4!u_n?;btAHlQXF7H&jS zTyY5Dξ3lr0`NA+E15@{2u-$EV)UA{L~n>uUF^Vk(F{W*qh7_2%Hb`|1_8pyB~n zIg0ja#QhVuRsc1ZstVW0%z<;I3jvKd=f((~wbJAtN3pYW2Ca>4Y5lFHFIBcT%8O`X zV>HoyGI8~Ak(T0IVp67H>WSA-m1D%h`b_;)I< zCG+_h*l@NBJwJ_1E~g>_S#WSq_MSK`j+|b{!}D|o;rXKSsOS**?S8YbFDtU+H#6E> z2M9sss0ecF*pc;Ijs4+LVTG7vIrIMBD-9xL$rEx6Aj|;h~G+~ zr({l`@HKW;UxO6??T5Q`&NKl3J+$~<{=qJOGTJ8AZ49j!sQ~!~x0-rNk-XsI+Yway z*pX=|tsauWHeEpp%2p4`yOC$s5qw{6k;>rm&v<54<#xO=OB2c9aZ3yC9$Y%2NJsqa zlCFXqxVI~l2I7K82mlev*6W++=SOfsjAnq?EqVBdtYs9pT?f^~jJZjRL>I5@e*no@ zdTrHeKKce1=zn)|i2cv{1}Xawin)uK$v=AG|F3|B0Opo^R~qf_5$f)zDqdq2I6)Xf z*8R{OQcB$f)*q~R?yOXM*%ZbtbbFBGY`dXjse?p#E^wnB;F`$c7A6W1I$i{BO57ka zi~((?`4LhOd&Wt0xn;S&YO-?%zr!G|*ISyMjHUG0^p`qB5gLu=;iRUPGVNfgXwDdu zAU$9cs(3EhDqB2gojvIvDc8lz#XjYE-2kzSp3;fSblR}YX>cQf zOInxd5kk$k@hYq%C46I0)t*)ZkIQ&6%C)g#i)CfFGQdnH`%E{f0xY|Kn z34sKnuIeMt{x|}~2w3Tp+886#eqK+ZVI}_Ad~)QSCmdU@NnRVjxjcjaunqjWm%}yx z1>xu%5b7`0QQ$zMCEoW%E%5fDG<{O00WI@4q#`JDE9VQzF@~b#X1-2ha%9YuA>|Vu zW892_<650gmDhp`FdKZ(5jrW z{6ahi9p=1%e#pGhqa%KGQc;{hgj!piub)_4^4JhOcWHbD%ZrLeC?I-DmVQqA{sM5M)3Qq@$e!G+W> z|JT=N5{Tkh)V)a-_5Nj~JppJ3I!*P<=s*tWQv}W_vdF>!hEMt*BG9Cv78Gzh!XNIf zEKCS3l-sRvQ#*A5htQGm9zs14=%OseyS&uUvhbn`A~FEt%6sD^1RNsGNZt|#hgDV=6Ee8G-`65fGi=89!-?9;*Bfrd zKK1JeFnbQt1lr&kKe|tRBC@Wn0IJj_c6qLz^_QdnyalBi=X6IO1`P$ z6?L|L=?c*Sh^uUCk71~>59`^-I!WXG-Ni@wxXS#u6pajWtc-|?!1n1)qYhGV z>rUa3T|5zIKa?6DcHYu30q~&l;z{-LBD({7+%;@;*q5!`yg$s-6IPpXBW)_Kh-X!F zb&|IN@VHf4h0^Z02CI93!0&NX5}B3$iU7>(DqO1Bp(E?YHoG)v(42^p%FUH{=S2Dy zr(OggJ&ABnd}e#TRMt$oWuACLdZA98XzpM_ZiCSJiZ@^clPqS({+&}@DkUWdyNJ>7*!!v{`;XdRx z0vbJ%l^rPXjW#d!3^OSg+Iw0i-~e94^v5`*_p@ZO{phY-+8x@jf&J8V8IPb#-l|40 z{>R_24d2bl(Ir1qmHmh_i2j1kY)kyQeEq$iabrQ<{|Y4wdpTqk8}%EtPtE`dX)tOh z1oL}^F}mR;?Kkpy3YWM*76tAlfV@(tQ5}W$>Ks+#R_q9+@Vw1LH7EKoO}1BCQI(36 zP#sqTOfW~)!UUO4d?+h?z_^QSLE6Y(z$}j(wrworLEk_ zB{`x?m2MhqY%kREr-Ya%s2!Wlioaw3tVypoa?p-HMwP&StFEl@f1G#!^F;aIERg>T zDJS}W0}TE%1FNs8U|3+j+kH9%VE@1x7%5s-h99SzrFX_?)vyhq&xO`rqk05jCvUW@ zh}AaUhu_u4yu$iiaux~|qTpUZANp}Kl?$+ZlgCq|<_ezYPNK8IZ{*N0nz?+Dw# zt^^h)BjLVh05&C1`av;}sW;Y8x`oC_rVk2TcEAqZUDP(bg<%&1JtzD@cHoe_3+<{X zv|8Hf6At&SR6cO+l3PXR4|)Hdk2K3tXH9PUGe?t_(Z) zz;wg3mY*d={PvkqM)W)UBx}yWJhCSc5jckcExl^vqu9)yQSPp253qp zO-DOD@D!hx#hJk8S@Z``vLKFlM$ zCwjYa68OD}e=j#UJ-c2EAvIpY@U56-*Jdu6{Tx3yV3%896b{Z2gX%1~ z0`EIQ9bch#w((Srh<>tONL)ksR0w};WwNPXTmoww-l4guadD$-^4>n=8hUXmO68B&B>Rn;e1kMa(?%7uQe-GyV({9A>-H;$Rp!86e#H(VPLTWzkU$*tTk1~ zxVK{WWOxix_ROZZ-l9#2Kiqy|6f5Dr-(x&1zJ%5g{01MA=7%jldrG$wM70E%-{vBB z-ZB@)6uyF?aQnn$4P&Jp!`@J*$EHfSM6F=M7Mov#S4k8Jeb(pxxGzYX70Em(b%Qvnx;x*un;V~Kv}lh zRqh&8UQw7{T)wZ`P7BL~e1r7p088}APiPQ@7s}(ub>AVifO}aFv$r)+wNdKto4%sW zyckoKZ}b>+5X@iK1%g`hqSSAHhsw3!_a9)UaDG2HVmr|G5^g=fQi^$i(GKgmMP`AU z!0#PGsh3?T8UNvzCCnCezxy!?x%jv0AQJ!c=}=q2wNF}X@krbPT9QHFScI~S1I&1cFapZY%A}oKvyxk9 z2X}YQ?e6XF&CTl*-U{m8)I^#NY=Q?c972XLrD z_(Ni-_eX`h>xZ6$eT9dvBW8Jae|9eNS|kR5sv@gRh-Y-oUB+Up0S!0r*!BtdTlMJ`B)2J%a)5hZgZSiqbc5hc~= zi^6dEHavUeo)V1DukByvsc^s89^BcNcSu3qXx|F(2rtP&80LY}ZpYBJ9E=02M@p|Z(pU4#9Hh4th;Z}NzLYcsW_IqeZC@A?mWROJCG(LW zR6L&)go156n4d@}tPfl;{uYk$5Y$C|xDP*ix0H?eF=8y*u4K6&^^GrKVFcM99AMr> zAmqworv{FJJXt)mX-ruwvsym95egPp7X7D4n}(O1Aa~r3;cX0<9QKFs-ex2}(@PAH zzg&>iB|1n!#fYt=Z(9w1I}zxg`@ zOEh+E;r&4vlpXj)0DTaMssr0X(u2rc%0pn4a($@9E|p@(Epx!I4a{X&1?F>11#_`3 zk5ckE)q%O{6Lmbi7@}h@IX?u9&iOluN+dmCqvIWMP*NPZV*Z#q;%HNKWE8D~pr|@h zB<@*<9X!7af?zf0LEQ3vA(udSk^@llWht~7Jju>X0e$YV3BQ*v^p_2g6@a*p4DdB( zkBWZ+F{DR= zhF_--!zZ`;QWw7a0OSus#LJ}Eao42Wx#xz1w1acgNzy&HM;L2T?4+@*xO=u1aFv5} zAKANKv3_x$P9d%v%@F9Fw){MEkifNKjzKUBw6h(u9xlJpu&8vBo%p4h6=!B3;AH&u zXOu+jtlgWo4ye0$lmX*0_q)yr2@O9H8J&qSJfpcRQ15W_XzH%b*LCGk{bxdj2>ad) zR$jochQv{x(RY=Ry-B89?@m$qYfCG~31unC;;a`)ZCtc^#RGfU9<`B;>M$IqgCy0PQIz)EIgwdxM%q5=%voQPOd& zmiUPbR;Q$F$YS<4Os=vx&;q0ag;jPXhMA+W-n7f&qjSMoMV#-#q*2nZ3vEiNR)G}N zhSt?JRSIUR+hf+`hkU#_HtFxLPR`<(O*NZDrLkOJW?Fxz&Aeg7p6*Yspi{_=NPhR+ zpmluvl;;)gDc9H-zWznvI?1*{)CW}|*3Vj94*iUaH3ebs@ArcdU*0Dy6ekHoqDDsCYG?#C4D5bW| zuZe4eO&jkd!4VE;zTMxm2S#|<4XU18x#s<4WqHo!F!y8ye49Kc+|@p9G4tzrT|jN9 zIBS#S)Y8|EhsvK9-L_G^njXy_Z=)TUA2mRfxYIXKq?)UF3w%|uL=nCQ+6B0jwPsel z*I^&XHk({I@Zx4e3v3BdH)1^!Oc^6E}acZso(TMecfDF)PM=*OboW*8+ji z`pkM6Fky!xxY(eW<36;fS>t&lRnd7)I1%`~VEH1!Xr7gg5o1rZeHKbS)pp->?$2TN zDxF>HB=iouH+U7Dek6H=*8U;|#LsK*40`xjcyViM!aLz;kG|tW zwV1ZnwzsgtoZb&K3}Tgb2{fuCgYuzX3a*`#M6r(=wJKn|EPm$zegYmjxj^mX(lVcb zs&SN3E9#WLstMtN-xXt+2aHt-J$>M9q&BDb?*Ju~5?|!!j2ejzX%fjH>x6IaOhEvv3 z$SYa^d#Ym_ED>=%+OK5KaJ9eqg)y{)w-I?3uP9-?qO(X?u=-)mF<bxg@PqOg6ra`e3Sx<|k=^(fQ+e zwqXBSYQb8kM=d#6FCmba8ZH5!uE~wGV%xe>MW`@4r?7Q}zn+!-9pQk#KU^qsp1#9!Wnys$*8PHu87p-!El02Tl6VJ~o&F&o-^f$2=O9D-xH$Qktv7U{* z`e!m$Ab`zoq~CHBLv_GgI6JDr{Mxdcks>l|J`iyI%-h^ds<<@&Epq?U2Z;0Ticu6v z`X||UyjPMQ$%Hs%9W=O>j0MpZK@^!3{KET&qK2vv^XABHoHo#3tW0WPm|$*#jOe{| z*I#Gk1QmlRHUr8N1f< zF-8^Bio<)?8rW9xIR|!X>z-8eH7m`%?CZa53i+xrw_uk##cm-kR%Pbf>N^gh8h|}5 zHp$8M|GpIku^cyKR1n_`H1M}}o(7;(-=3jL!)cW)(NZjXZ&J?{RBKe*A~cIr?cMlE zMaf|zjDAi!l0g|k_LI*xfpRWLc?jRocSvgY@{RoED%QlB>&9Ok;0BFQm3t`s2Oh*q z)kBEX3YE8wV7)-w1d>w|V$2FB>yF2f5+5O%s-@E-A}#8!a)QVD-W+ow0EFbS*OrrC zh{R*3p2Om7>y&dw6Bofog7T{TCWt%a{{WNg5Ov*nNn&!$aG6t=bV05A(}#Jn3)Nvk)?dU#1+y z;991NW4zQA6b5Z0bKDseWU%A!qK3XqxG9pM-x7|eeJxsyHSeUFddYt0HJ6Wtx+>|T z^u%o>f12<=ZVqJGe-nb^nA+J+z-!P)0b_2ZN8r;0!=MF89cr|5dCF@Vw2d^$2BsfJ zO`7?P)V4gwP52aUmCy{m(nnC^BgNxnP*W6$Rl&-dP*{#%Cl>>e)V71HTac``>C^2f zs@n5V?t}z7kchcaPPbh&Q3bzIydV)cvV3-;{t}m9*Y|Tebg5r#|2vju>CXoC5QA7;po)^40k2qLjJm)0H+@02-qRZ#wy#Kl;C%#Nju_mF-FS zOB8Y)$4^DmyoO0$3@G6r%PP~D46kBYbVkpbc}&AMlkyB+%D1SIhDb)>+|mFVh-j<= z#pB*q*zz}oI1GLHuX3<GLvRa*W){3$LY zO>>53X_U&_rM0b!L#AcK!TO{O>DEtP!OBckN0#TVTy4>m&bPw_T6H#gUjX%a$p`0RW;fW=H9wL+n+;IMel;c-a|@(RrvEuNS@UUr`A zr+uksqhPhVyW;?4RiBtReOY;|tdcyf-x5L%#nVOckIFa<>b|q-vYk*Qf1H~5c<;a!!)+AyY8kTKsY@E|9h}h_b1HmxX zJ&}fkrno`mrn&HStkBofNQ29;*d_*=px0gyRH>Ktwlg8znj_p7?b^p( ziNiKcA=~#?n8_=s^$170MoBY@0L+U7H;IeMG#KlMVUKCwFV|;pi%mR$7=+y+>4C(I zy?P1Qfzzk<2}nVtxkT8x_b9q#w?}^q)JZ_AE~a;eIlGfaA%kq6Tj~#@8cft4HlO6W z8_tOt6s|0kVJz>@HkRtJ9=hu@3}4fKJ|1n4phyc1EtNU=ClszntK1gu4@-{D6e2~X zToX%q<`kX{jCv^_3`nLGO5p*Rf1AW#!p9o|;8t1=}QDNgQ3j`KYqWBBbddx`V?34`TRcRucGdczB{ z?4W}Yil0K&B^0oa#L5>jdU2{zk|F)rt!RavDlYiGT-xD#t8^n}`cJAE+9I76AwVSQ zEQS<7mPJyMtXE@4lc@Kxd^@wVoGAbEp=Dsm5aJ)1z~{JaNXlj5z~)_CqFxu(-h{jI z;xGWe6b3(~{-q&+AN*NI>XLdd4w+gL}rv_trb461?Xa}#4}$8{kzmz z!kinW+kHR)7*yXw85tNCh&&rK6U!L+gU|@5R{&XJ6%%Ug3@eYaMT-o_Zz`3?&2PcA zZYHG;%f;6)kyU(4t*e~$R+zbZCE2W;?TKXW-bwqUD5j{>>cU8kAW=1hEqxQQASz-< zSq*2MdyOc<*o3ER+uDU6?;K^Nx?0+W(x8KEHAE=9WXGy3_iAMz{7Trz+~}rRjw!zg zBRIn*C8XiRl`vNDvoJ?;tt}B>4Q;lUUKY$6NK_0bKY~-2t2k1u&5PFrry}{Hy3DlS zN7VPFC8sL}j{V4qpI|kJ$5C-41ShEVvWW#X8ZqdyQBjoluW`0{FT8eXI+6TI?~>~` zU)?1pAQ&?b?D4`alQLZ0uOiKFHn3L^Uo z9SD)n&@OEwE+x|fi3Ur__t1sJ75gu=xOI^%kKOiZIpdhfGegDf_`r~gr*ak9WUN%? z9@`ueW;ZHnDd-tJlgROni3R`8mKE*YLdx6tgN9tZT@m;r5XlCtU5as1npIXps4au9 zrQi;)?4G+xrQ`h#X=mgs0@Oo+>Es6Rt8~rmIwD!RuNl>_@cNf*jgy*^38GKCmA3q+- zxc6~=JQAC~ca_*6RPZJFtGU)zT4Gg9oWEGz73Y2r91=}^b{;_R9)Zk(h`7XrZ`IR~ z^oxx385%&eq9G%q+=qZ)DPu5|kBsGs48ytp1n(IK?vPFCHn`vDAbmTi23TPvTNawo zuCBnDzYWg(u?$J~c;YI<(+#KKc#;)k>4A{3hOP9%H`k0D<6?eGiIT!BUw=`BdS5(w z`saX-VFg`t5(@(2_``qkU(=rd&p~7VIiUM*9KsK~1$hEiOaTPw3nrNsb&V4E0EYtcnH>QYdt?w%T{9^XuZH|NRAa z4|Q@KEl;Wo%Y|WYuQ3W;q$X}7OL9NjHTKYT>QG@&8NWGe+%QcfJLXuyCXXl|zXBnacT9I%heZ zuFJlf0UL#&eOYe*THkcsK}jEO8EDT>>l`&Q>iV%ZG~Oz$nqJX7gscC1%w72esxs8Y|f)HV)Xi4rO%@3ze@*ulZ$ z#&mdro-Oa7QGD3)lvOhOYM%pX?ZSrD5^QwpJ8O|%3IB{;6Z9o0&?r?nF+a&zNHft(0dIn1 zGQ1!)d&K=ekb;3{Er($rD8c@J%Le-Y9rXX?kN$7sXq2*?!bi{vOUT-RbddN*u!`J{ zl5-7AT%w9sw>>2u#c|7Z+ojIdj@l|^nx!dEzBUq&2B=br>UCO?9npR$(J-quVFouZqJq zMZObadvGi8kFbA(opgx!{|x)T(wF#`@E*B;QvCnxq-c`Q(?|9175mo4wH}g$z?@nKMsfUJ~Et0eg4)zazn0wbv*5jY?n|DuRW(e7u0HW%qJr1{y z#ptD#D=)rH!(k$~j+tn#l`C_;P2*vmbB_$YTs=ZN9Ex+}a6Mg19F65c5x1)vJ$`eX zn6*Ls_J=4v9}Ao<8RJmOVCg=(Fr#>GX~2g@rZ7N5dY4}6MHL{JFt8BcA`V!R-sP3j zW&Rjx)rtVPq<5*M)`4Lj?KrVQh!`n1SOcTL#P61HjM#)hB{0O&!mDZ_8ldymL6O0CT zZY(SUUJ=?29wy83p8zNBSx9?g*Mh9x6_m@|IY_Z}uI%2=Fc^5Uivb}&`V1L9gd~>_ z0+erAmyZH49JxW_u(G{yAQ@cUlK{5lT%-*oYL?U~Lsn2DN@_F!Bn>;Uy^6#?3d!!2 z0A;@O8%4WeB3BS3gapHmJCvK+dxb>JX9#&`!UF0YMM`GT{1bpN(@Q!G`MYHdGNNV# zl4eg7R<)*zzF}1srXCk)UX$2+jATuuI67WEP=HG~o}1n)4XbKI2f`q%`CL!rh%jYV zGXyz)X@Q~{O~Y#ltv|S3!m~a21BsJmd1yPIkah?kCc`BcEFr@Z^CPDh24)QxZS(5L z49)P;gd&!^BMOcdhWx5dO&+H@(4~7;ZMJg~&0w>;R5kp=$yAxJ;ec zy2v9Ce5A0G9s+>nmrPo~53X75=N!`-4Q!b!C#}rj`w?U4KM8}!%j6N(_En}e;aBSH zn*F*{hD;W0njfB6KkG`3I<&Lvut}1{h=^l@0?+G8E$U$OXLx5PQf3GMO*f9sW4a|Aw97c&-eSviDloLO_x@ zD9}e8;f?1&Q?MTLYi!p~yp^OoVG!z}7D$X)2P8&st+VW{Hmi-ecShZ$i{KjkrqwYx z*(}xs+}wHfz%fW1^tL6qaM^;*?*@Lw^sIXbeg1lIV4yY9y>nXk5Ki^V_a=dDut%o# z0!sOd>^HLQ0^^@B*!;k!^^ySl7Q|DWBSe(^oC?`p>Hope#HSq?3203KBXm1SMHo+D;VNUP+h?lkOMCu>1OJWX+a> zPZAY>8tkA{jFaSRJ+znVxqU> zrNN@RgsG@YuMV`0wEzmgEnih#r$&gXH4S)d@@mMj=jk8}CsjD2_r>-(VV(MB_zb=| zI(e4X(lvd-R`Th!f&=YRKpK?hdFc1xL@8fvH*5}aoJti(+VAe5awxjnckmSsgs0?a z3+ajM>B+JB=X&UKqfM)eiFjIx-1xEv@gmOhj!PkTX>togLCwL?`$pmU%3xvt1V8n~ z1sfX_RJ&yJmiL4yNCQpC($`4QxsBM;sn`%Za*p%YI>2J1tPSk$B~J3lLnC zmF?JIzgnU)2Ub(uN(p@UGp~r=fJNxjo@-2#tmVYMMzR4$tk^OCCWgK(i$`A&8?f86 z0hs80$qYb!&pfON<*d{5+o)m@pe|fjvG9}!7P z@4^kW25`a`Lnwq?>*)06IG>Ij#lLe*DBunQ8Z#s}ewv6lSH;Lc*_Qnv zKS1^~6Wtfrxz5_K@5KVTku#)fM$yc&op_6nw|E|LXl}jz8zB?+yVJhLt;H9LsxogHzEKHaNTvIJCawn#x_Y zRa3q=tGsj(ZTqn6o1&XcKFRq`eR7%5_a>{yn=~WXpfV?SHvGcg>&1Y5qh-@q;_y}{ zgXoBz9&>rC7yWZTb_&;kbud#H#OdXU%pSv9#(!j9y)iv#9(#HOl6SvNFOO;VArT&= z6G)n@X>F+dDL}>1iR4XhgTr-PNs*B`(0w1ueM{}g*H@H$r#d|!`$L1f^AJ1~`(sqb zOPUxJFSWFzL=;cM#eBZz3eMB@LU&T(0qcE_XAVOaW7yAT3;KyU)pCduF2$X}&@266 z>(XXO^!vijz7%Dm-T~zcHu~k#kZQ?rl4&+q!Qr|JRH!J@;;UQn?EN({-zO54viV2f zD;59acj=yf`ivr;`d_o9A*Gunr0B~91FGz7E3xU>ld;!@(;-};fA(PJq%5xxz2%aQ z4b<5RBuVg!M729(fJ}d03neF5wpMMFymAhQH8xAoN;a1;L(?7>SMUM}Q1SFI z!l07Vpl4T^`GFDo-0^=pJ(p4myKvm;LXU$)^BMG%zrPzVWLQ9F4{MiZ$L}G?@w-`L ztL((o8R4Ywrg=TD1W{wJ@HLvr-G{u?rX_Yv-2@5mq>gK7qc^ z=5{k?;_G$EnY{(a{c^T`C!V+WeYDQ%Qpz&ZA*;U_(E>;&4rn`i-HP@528~cgO*p!H zRGNE_(-$gjIpjWiwyUdkin|YINy^vTfsrqMX%W8D--f7ewbM&&r<~oq@+wkC07PH0 zGyZ(H8-I=}EXv5{`4$;T$GkO$K>Rod7PxQnnl!mOxGBPtKbPHC-jA z9>+XKO%okJNx0VREm!#%_4CU-&4Lwi((TWs2E1!!?fnJ(W7SyQc_}6}m4wVE8M?mE zHv0xbwH2Grlg;0k4z*!a=5z!ZK9IukN&Di8b07X`B427zza;;pbT`74O0p+PA1oKv z!N=6zfCw~IcZq)@Df;`}f)JckV#Fw!P(k8OZO;PWE0XJ%DB&Czk(G)s3@zLp)+nl% z0mfIcT2}}j%rZH1Y@EU3x5ND;El^NPqV^lYb8;*(M=<4{Pt9q&c|0ZxoBSnrXVZ;n zEuEEP4s9hcSxFG(KelM01bZxuA#xAI&)d{oTpy`~u@<0vu4bg^j6n;ra4W_SCnQ<4 zHKXOg5%23Qnw&`m$V&B$(pnuAIx1=jTj#TjGwS!CF|o&ZsP;3XikWnwdBdcv&VJ~a z&Q2^GPl4ulQmdzMZm6<~ zS8ZDEjz?6O6RgUmuIwV^n_1Gt=)Ia238G|bMPJeKWo&ffU*rQ#sKEuLH<-MeC*kaz zF%pk#sJ6-{Y+`amlZm#PC#6oSgub!N&mJ-q3tw`1SljjI)LOplamU;L?09Nvy`j^= z=42&27*?;qyT6wF#y2;58TpeAU~f%1@7iUV)OX-evdYU?*;p;3xpB3hk_2PWqqe3? zps4FAE!#-_hCubYhyK?aU0*&eq9y)ar3qL($G7ZYUB2>m^wLt0@-hS6D~>(36O%kA z6jcU5xs~`^-{p}22zyZLeR&!t+!tSZh9k~EN*d!3IuylE4;%pT7v`VZn!*5V6s3B( zdBc*G&Az?e~utb{a{2;C^^jF>=aJ`N5vE}k8LM8+2*kVMO1iJc1eZ-JIz^N z4y*)k(j$*XS<^0HinPpVU}x781!6CipaR7y;sTw9yrTNUbG?FQE$vzQ8Ij?i3Y3iF z9(fY=5J^@F>qVr76LFL|PY*I~n_tW7eNbGz08^eu@<* zo#3y-1QXUE02rOz?&)?3D0S5tWP38#ARUsxta{S|;d z1AR76n;y`oiG!oLWd(`So~S)Z!dsyFcckSsM9qFEkb@ML=awq|46?lvqZ!Hm(tmDu zC!}WPh1>Ib$p$H|k6*mA(V^V$i5K(h37(O+c)ztJ9y5B~5fJnhc6CU(c3vkkL#1$C zL&sk}&P8wQ^i{bUU4If6s#|&1C;~*4L;fzc*B>tLTYbjWdx?6QNFHKoy@8Axa$H?q z#?%|`mm?GsPYl@I6GyRv3I3bF+qEf&Tc^amxek~ z0Xz}C^rHey5!AH!=r}kH{8T9<%4V3QA)btT>HOJ_e16y>m)6K=_HPi!mdoS zHTa=cKggD5^94~JD=9hPc?;dLuzw9b2~QL9c02r z|5#yxL6Z+G_;x8s1flr&L*I{zve8G!wBq-(7mptIymQ@%()g8^7Uh|Bs~%>lSs89_ zGW+ede%7B)Sr@XMAD&9{*XVH-$tr)}*gDtCZoE~HRL4=pvbCpX9tCJk8#^=|el7e? zD@pzx%Sb%v01=eC$Q}!&rK-OQUth7|DQ`otuA;XBZ?@nGwPq;6uztw%VU_Cb5G0nI zM#1lW0oK~=_>tvk+_=P^UkLRUkelp3Kdx|ltn+>lVl^|HnRP zszb;Vw7SiTf7jHnpq^v60p;g_7zPRq!iTEdb|vlia_ocQ#{RS&Oj=nqi>ky8%h~I; zEQ|Pr{a(l@$q(nI4cJb}R`+OXtAj!}f?Zx`Jo&`8owHBGw=D=<>|9XOFZU0b?rrcU z)gCzar&=Y5D1C%dW*lm~XvIEQ?QUI^O!8M;H)~8>y|$BQI4&MLZ8%ty*3J|At(c7E za51~8JY#Qp1cqLi0+tk_3Y4eBH1zfpU8VVw*MROq|G-a7=# zVW0kp(5%PoZ9`^$Mm6S1iYW8v;Sc1vqA><63O@Yv+1-`Eb`8p= zjQ2-*%Gr+vRU(Cjb8kZ|H5%o=vjR1V?aM&GtZCC4smOR&Rl=w=AIaAgs?LA}YKk^uRac2ET5LoGHj#L-<6u zmOphbEig;IV#n`Iy_cL=oCj;R#}!VM8=|QpMk`u~Y?~7zxW_u7VnX^zcHeuro?dcq4U&>Jn=@;rlR}5e>zQyy)W#`5I?D&D&fTIj@cVv!+ zAt07`SgTbTCV?!Kw=JMs6iEE`YgZmfInR8_#e3qDo-Bh?k5ca#lG}4G#lg!E^@W;d za*rZdKfM9Yln|A|>626Bt}k^$hz?Q$i9)FXHFZwD!dGr5mf(RH;UO?h09MI2e;`PR z`-6$jnN>5xy(24ZH{|_YH6_~mFv~KiBy!mW{#oH6DOwMGN$#n2L7twKTh!;Fhgyti z=m!K7rIG@r(zw*T7z>gDHIW+N!;Oc##O_FZFPMpJBU|{vzjtZk(nS2l4BVZ0x$)pl z)U7z2A~gXM<2H;}CLcmrNnO5o*-Wi!)1Ip>r)m>wjp0TJ; z%!u@jM2R@U;rNX_g9#U9*sV3}toNi`gryxrT&<#6*T3y|2hk-TSbcd8psY}={Wwr$(CZEMB0ZL8u`jEYsUor+a)Zr;7WbI<*7_IdC3W3|=VT7RB4 zpE<|qW6VB!&kPKw;R3V#PPrNErCzn5wgQp;h>jn4%2Uu_DIn_*wyskDE@b~O{pVI~ zQsX_}9r>9W<=##xJ+-ZK(i%0#~UJ6w6{kY^&RP!G+6wX+p$(4NEwkQNpY zB*d<0j3B!Bu4+s_jBPN^1{g3kNjrGa3f%FodOOlgFwKp`0rNCDhY!N0*KwkDaQnni zUa>grrg(*1Sp{xv1t#iQJZf2f6b?&~zIcV3r=Ucx&+dBNcL*BHNl zW05?oiS~m1IXfujex9>M!oMaEi?%-Gm@KXV)i)xL!1dCV;g#>FnDXjQ9x<_nd`t6! zG0A?Ukoj3o80$ty)xTQw<(R5a&-u}Q;(Tn-w6Sa5} z7vDEcxOMC4iXOA1mcaZlXFKRgvvR}kxO=fP!efY{oDlCZsH^-b_@UsrxmQmVL`g7O zc&0TsRbSJ2izJ;FVmXdGi?Ey!W-t+~+XD(BX1Xar(~Pm&-f)_(t)>1V{FAi2gux7C zxurE_HJ817Qwq-BI;`1UkYISiosD+s-1``j!FAx?I-aE(-rnZs;PCrE{0ZK9>z3G7 z2^oK?KY)|bV7ytmAXhCZp-YwuREqL*pWRGMgqd{aWtgqA$Csu^+E)m6GJ}W zP;r1o?MJcGSHvE-IzTJmZ8EZ*L3{~Bbh9d|OUVjE-)Z3dUs~{OG&$7K9nXq?OcJ{}ntP&0 zn;+H>y%M4VKXIY-tn5SFW$mP}J_SH48>C#8Pd!0$VhV)WP_1`k3zYM{1n2DJQvp9E zfxQ3dyHo8A4hUI3t@B0vQFecbyH@W&<@82Tx=pc9^Rl}o7oL3;Cyt1{B68tqQd$>H zU2IYAhS|t+?B>%7T3n~k$4I8TfCxuetu{^ju#v5N^|t$*LRuN>Y~8!kB$XF5074Rm zE)I)}1`TZv&67jhWEATt7&{gOUql!x8Ze+E$)0s;_H0{wu6e(3OVRGb) z=yCCFQPntZ-GyPL22lyjyqQ;2rzbV;@Rv{fZedP`vI)IUB`4H#s1SNRx6FEMjH#Tw z8qB+86_p{XiV(`eHwoEre`9hu^k&1143Sq3%#jGMmVmoLP1;cGJ<+vM=b{gZJruoi ziF+B<^7NU^b^?H@4UtsLW}aSkc*l0xl7`D>(UOJ}pG$D?+9DC+~r9vzwqoOg=YJ2w~*H+xxSVDIzAaW(^nRF&wMIY znOp-h`n7#LMWzCi#UBFD6^eud{CA{>R0SPEp<{T1ir;XI&{FqBL&-T4FX8NhmArvc z_T{^)urOO9p?D7pmVXq|IG1lxmAJC2-dYIQ zd23^2J)&=U!L*I(akKo+kZ7vWpstDM;V3t7vgiWgD#Jvadb~1iCoYbOp1^fT3vz@0E`=$^FsG>&(ufP%*w=kBi3{ds0WWDaeu&^?iRU=SNL^$b`$@?*bATV`%un9 zruZ*HEn`HQl_2FzCSlSqsTkhC`a z=7s|8*8M(E=-ys9e?c{|7b73NO(mj;{h1Z4!ee1swVgT4wsbRDPd{-PID?#{WyWQO zAa9xbh4d@nzXVbhS79(O3w(v0QGUHzm%OB?Ou;N61@sc^byd-p{_?d!eyuI?r8#s8 z5TB(s_~yC3+lk*@Wv9+vbyZe#p*bBv`;0b--uEpU^}7zBe<`hahiJmlGxV&Bj>Kw{ zV%I;Jra^sSCA$;v4E==JevXn@fW?TsvP;WClrKmTmg+7)ZKYw9QiIpxPsoqh+1r4B-D*GIu!frve6d$l|B1mO`Tt`E@;@8~ z|Et;Z)W1Y%k^ICq)ZRb@_EvH6TDIgQRT-IxvLbSLQ<-bXWV)#IKmw*h1o{Zg@PLVw z=*z~t$Fsd0%;)20+B|^LIir-~lp>ud9d&D4A*^dmt89ta`*E{%p;H1ZWrW=Gv}Knv zo%s`~RbEm{uoX((^A2VoZc|L-#n-ZFIT>(M5-$>$7DkdxLz1m8%rQ`Vysu;M_)=!9z4(^Tw9OFYd z?HCZ$1j(N5!Ql5lDo_djU%ZO{m0eJqqNlRRhz8ic zR?|~VKzAIzV(8@59>va8axfKiwJ>z;!+oGGDa0+yzARbAf2{Y~MZA{a$^#787w!@3 zwk$m;O7wqcla@H}%U@NPgEOO!v0fhn zvuUDpqij8?#j1AiXt0rxSN7)Wp6~KQ_7Lt6J8G7*J?W2>`54Dfy}Fh0ItW<(6Mkoj zN#jIcX>6yTDhaPccai^)lIu+?BcnyRtMh|2{>OpiO0=yT_jW7uX?Z54h#Ai*7SXaA zt<|^o>FQ}l`aMxpib2sbiC@~J7t(n7=U9dd{sr*=ed#oA#6T2${HLrc+PMg*Y}x^T$(fm28M!(*|1C1{pT5EJ?hx$m5D4M`YWV!OBG>n! z;Gu&U)%VFJ>Gy-TBGvbex0LxD)%TRey@R63;G{QVzx2i6#qN~F?gMGe_JUS&vMg+D zw8qHSvNBV05UZYl84Zg=)a|^@7agd!P3EYILjWga_{$sXclHJ-t6MumlYsirYw{`1 zNBXw)otv7N8X6m#0s)I-6s*Ece!~EzNia1w1pdqMcXnD&wdm{2@&Dtg{Nn!sIXGJ| z7&#i5Seh|dxqC5~I5?Xz{O`1%|Bc({r#LPT$^_@u3IFQ6L#PeSn8QV+T9Ozgct1uZ zw?U*vj%}7x^@an<9~dv3Ar>eooOpX~A@E8z!w|0kh~A(MIPKx++x5?(glE zs-mZWDul+jVBgiESLlnTa}$l>)a6U73fwpXhK`!_g8U8V(CSqFEU9Vz+#Yv#6*3^4 zq1W12WLDDmG5m>Tkw*t~zK)#tyo1GJX2a)5{_;-Y;){%O5MdtY_Lpq!667`;27JRM5tW>3gXN1U zQSJ)x>FXa-TmUBH<&NV;osIMN?90nFs;d2P=6qeBVYgU5ClA2K!j$1jHKC8zi-N}F z{^f7JE1^SNKJw;Efp?j_Rt3EjKMF>GS5`mNRJEw2iCA?+M$NbR8t_Djcw@MbStbYo z$OXNUY48$1(M=Er+%nWYFqg=DhoV)J4!6R*mM&D{qniw-BsZ!gOn<**-;pGB^cB)O zoB=Z5kRaH0UuQf6>XFmK%#B~CQ_ddW1rU{8Rct8xrnqvpRk6Y=$f=k-H+SGX08cN* z%(%rTyoNG4nYnSEXj<(0O)jSA^I?DADOZXV>Mo^2up0Uvl}GlR^T8p@Tf3|sBzAGQ zpdR&a`$1EV94hy(&`SG{!eIY?2>IWbqJD}K(xOaoa>s6d0vI2Ia20c{G{|AFBvH+- zOI9qcA`U$FWg#C~umXMYDEpx3;8gdd6kOw0`33~}Ks!+1X;vx>n}g}VUkB$n&BDD4 zr|=66_zR>m8>GI2jnn0sKeE`hf2V+n`~1!z?8N_l#S(*hzty}9k3m3r?i^mCZNEd` z8jhPg$MJCv`iF`HYE&t%mg|$cMgzMP8E)rw5ZXESUNf4-R%*>^`3m(3tcH#i+S|sr zklt_uxnomJ-;XY=n3^F1(dt*LHUKgCjEt16|z z58Gonf;qYg5}#4^!}{_?Mq`>UM>>ks_MghOU(CS+(GgV@xkWpYavU-SK6AnU~7U5NWD&7J&X zs$>7RUyYpTe^gn2Z_*xh8x>qNEPr|XYye#l(3)apg!Z7bRCJ4S<G&m_u- zC4yEzij-Ug>M-PptPM$TC>e!JCC*c@SA}*}(H?@Uypd$AEry{&%|9Fyg~v>d!B}k! zpG^IWsc;W=vhqpU{|Tl|?fhHqzz`(0JJ}-}zd2MHpc$^Ga(AQ7zo49MlfFzN%w}!a z3Q)&w>yoH%Fpiv3A!25veX6DB2tonvx?!xny&Q|5$Bu}w0NB61&r_k)DZ!k_*2q}z zBvOA0(qhKv(fQUK8NjAmF_pS8PAAnCaMtm{9P%QRJ37ehP?11S}E9%*cwGG1K*e~2KmP0%L>37{V1kkXie zVxC9`r|9k)6HA6RLLN9G<&$O5x?LA34>*B?DU5!}v&HM7OTSCA@}xf?1O-fh)m9=^o8lqg0me+7lTHdO=!Ez>Nt8_R`&m?R`Yn) z5oKc)PjS+FSB#4*Tj!Ml^bwF@HuW@d+kTMRq=(w$mOQMXStsCdTJjhxw$UC#Q(PuM zYEba3WZ1G)&X7peUpNYtvhQ=uwDD#7MvhoTy!0~a@*J;CvCB?lPyDE~7`1-&DIH0h zbU5%0+n)*q+KuJXbf?oTw_y<1s16S7ZaAU)c13%&QJdSbCdHBKG0%6L>l!v!89ZnA z#6ONGC6id*AljNZ*Jv8iNTT}^=?aLK92tZ0j5n6|QeR-J{((fw(>E7)UG)euH$~E| zEL1*1eS-50ifQnzDH95>@TzUD6C-6!Qx8^(4!f#awrWihd~?kb9NMlXnBeP|Z*^pY z==av+P+s<=?ugJy*rAMZobUN_(Nela&fyhg8cYgfdcsX9O}4^u+6@utv__y4o)I$% z6n2o+4W}Ph?0^VJ#TzWdj^a8qa?Vy{N9 zPN`|*-}3K(VD@^g;NqoYHrZ5U2Jfl~weQQd$4Ljp6@! z>LFL%PGvz5Y3rLZ?AU8GNK_O96^xLgs8FFI-fu-S!x6;W(=6QyCaZZVdi>Yrx1E1= zB`)peG3GHq2-)PN_ypX|r3 zW*Rh?U*$~8bNrc&6T@~Q2(v!MG@gjpBZ#4i-l2A&TW{My&7#mjWbHQS%4e^tOViL{ zk?5M7f2a;xF&>l``|qp{BAu3N9D1G8yS)Br$6+fFCt5r`~Ee zog8z^9kvvEP#(~Lu(4MOSC~h5EH}Ya2z@fK#d1^PWlTn%NrV7T9QwPk>K-xMtjAXL zLckI&R2su>pui#5jtEz~s6`$W5bGq0J9_RWGy?X}vgBHwwl%{fY=%k@Q0sq#br)2bnxZY(B?->$V=r(<8>(*)ze<&2> z5uAjrKRFooYJ|vPK?v%G01tQ|n;XT=21*U+We~jqQ4P4Yi?k!}!eWsu8iiilh0g5o z@1kOn>4rKD96QD{f;NZN8$~&CMSU_4pr4Rzgjfx5IHq6o!6DZP3+xtK62|WW@2X^c z(+w0F&`9{2n2n&G+;>3aefta91#mnU;eF|WV*R5#nBu<;9~9ipoSm&q&Hhsm^goN{ z|G8IFsw$&`DunbwPO6nuBZwp_IK+_+rs?$rA_`n63Y}u?w8OrNKd3lvzhPVSg!l%X z$Ll>zEp)S^RQ;xu#s`fUh9=HyE6{uT(dpw}74Y_Ui{9rZYt>%68wY@f_h6H?ZpRdY zvXjn<58ib}1I~#gjzJ*fPCZdB(us7Dz8{(mbSIlWV|0`n{ie(`a#6s_5>u)}x?b?ldz z;^BDWs9ia|#0QdBmt4cLY@08kkv`=*w{7B%tD+7(}1m_{6m~T4FAuhHz+VGzi z{nw$cB?99}IZk%{v)$kD8#RAgg#L-Ka1L$x}N+D$WS!O3_3Qk)~>=8fqQ~qpD!a_h&V+wZ#_4e&G>|~1V)EK zIoxI_FA?*zE{xA(>eUTZKF)NCuh1J4_ohiv*_aNp{*o2fo9|~}m59ozuNgfxzRS>r zSNVxb^FeIx8ziG_KCjnR7B<0>gz}7l3D#R1q~ssvx-AmBQltwGZ|RmD5NqH$VYrGV zF>cBBAR1ECgIJ_3!A`Dc=tNU%SnZ|&*jPd+W631jQZPl~gaMLJR%9u>w%IP3Srsa< z&?LgS_Ktjvy1-&IZdDdl9z1TTLX-CIxG!{!i^Sg*V(8@BS zkjnhDj=&hhrITx;r?*&!(@!+uPvg=8O2SKftFcIw+n%Y%EF#PSsfV9iNyYq){g3tO zlO~&`k^{|C7kQSpz{ULH?qV#8ecR{y@AXPYc#J}esQ!)ubXJ$hLYHC2RueKa@_7|s z@DM*ah3b3aO7z7|SOGeAdOUKw1irL}kAi~nc0h?0m`Y{~_=4Ex)soqj&?pr;RoaBY@%4|JE*)IRc5Y%*XW4uKyA&F$K% z!MEuMlkTdlGJ}kF5YH@I)gys0rEZo=YyzoN6Pw?v3GN50A;aJ+xNlsZb4)3t5RQ>? zWFI2*W`r>ilj}`kjT1)1g1A4=3+!dQ;|hJT!0uLJuW-zqiqI#x=}+I3XKEUKoZ+o*mv74NXrbR;Lx~U&*6OM(26i8-)NO|ClzoK^N~$;~ zuLN!pw(ME%;f2z1r#3@hvI-%=^z5J?I7ASkI~P&T>;UFt{B0i-6ajzrZWk4K93Z|j zlGZ<-u>Yo@{r{XWWi$6LH$(^f|2S!>Ux*M@1I@n<+7!AvG>{>MxU4sf3`Mb$SQ0ik zH4PJe(OBm&$u_BUoZO!M+eP>X!rGV6E*#-nCXV?}+4A=_!lhHLu5tA=PV(e&mn*K< zjk`{d>5scB!T=Dg;Xg3uXp_|K<%V+osNf7}lHg)rwX8duFQzbms4$c{lseQQ(m4_} ze#8#>peq*j2=*nwDaoVUG1e$_mwON%pfGqcfHlZDI11N>U50gH3swy0fyED9ob9#Q zKJ1un6l|0fG^RwV#yflrHvxx;O~AyfegofYsHyyQD%ETamBe7~sOLQkhzGl9MG*d| zVRf$?KVi5~OFu~u+tgI740i*8iN<_51qu3E3Kekhx3rVBnK3g=**Y(MhhyQPn$^?L zuqDaYO(L5olhkNwlF~}5%*kW@?vY$sR7wFm>`JEK%*|!8DlT4j9>zM?`e!~pCs(9* zi}bm%*DfWt=oGIAF3(xhfvlmaqqO{de4lGdof&(xNTeKhF8_6M@ocsxlRLMlV@+^7 z13UVa`W!VgIiADdM*a74U#@RHPrb<$=_==iLDroU?X0@r^MXxNf`mRJljtXM&?D6# z>$z1>^QaZcmnN5Iea0{FP7HVzgE};JHyb(oBdOw6XA5ziA7Hyn#>3L5f8gDJ(aVr+ z%ED4MnWsU`-wA@ftBD;=xO&_#-=CYRw^?lY%ii(|oRrA*tb*|6-T$x&Lb6IX3u-Rg zj7m){Q%Ym!LqDOIBDpm+h2xXMpyM)8F<__puJ2yEOxNN5%U#A)Vtm|rn$C16Kq|gs zg8(+i%2$92xy^hrJ=jdKCbz7&*)Yw{s!S5w;XB!4qPhWYP~tCdO2%EDo$i)J!ZXqz zBI6X~xn0Tw zi=X$-Ev;wQA`!X=sHb8Wq;Nul*|lF0khKIvmpj#mmv)C*LE$X@`ayNy(0TQLL9bhZ z*Ci0)7`N^V5Cwj#WndGMKv}!Sy|;|yI9V#fU%NV~HLi%7&{lN{YbOcS6LJZ4NAa#| z-+{`lLvjgCpn*%e&c+M{Fz&*`9Fdzb%WG{70Fg@ugu5qa45ER1U@|buiDUq*>1N?! zcm#hhUT;UdMSSo3xHgEOZHx}xKYAlD#?u&xy!c6Qs5D#(_Fm1|6aIb2-*7vc_>7b< z1j)QI%jN!%QDMuHb-A1Gn}KePOrkwUWIS=Y3$Fdz;PkIXA3&Rbwak4`Q?QF?MD${* zecm-&wZN>)_bk4l$_LOKmkU26J|(dq8p9u!*%S!gT^Z$^U65qd{v%%ODDZd%Gu2)J ztdfeu;q)&&pE1F!Ll&)u5KqM+zu5UN%k7cWMukDG>+`e7tw&^Tgk!4~Z*Q7ihPJ#h zd5$cOBXhkmeP(Cqk=HH9j?$NAwus)=I@D)A{%XFh7+Cd6e$}w$|5(G){T2%_jIeVk+We&J z+Vp)XgC4oUE@TS>uPbyR6^mY8#%cW7z3#Z}$fT+DOpect$ISb5QE_p=$NM`XP{=HZ zCr@A_&?3++Fl}@@2{DPW&`?nX8M23%FY#Vp#4E8TU+~FpQG7HfCNw<+Sp+8oQJ_8a z!^tu6&*4C@ND~zA4o?|jPz~G;PxvTNk8Rt)7%WUo)9&~Q-R_CuiWnzfN|LbO%=5k+ zu}0W!jKupSy2&?8bw4u(A(@Yyl*BQTnbSHb4JE~q)4`xNL45t*lR>gxU&;w8;uk~f zaKeUS?5xC%Bn2%I-EHL*N*3o6e2nA;Wwwc4PtQTqn!`Dj6xiN4T|xST18LN*KSi{r zgGSM+0fZqVIfrKE$Z~I{E^TlaG!3JXI#qTo6P)98sg!!)^&__0VWHZ}E@El31b$g# znHVxPSzZzpeEE_cUE{o5XMSu0TzLaUI04K!23Ro$dQlW<5SL>0+iEXe)%|n+z(Kn? zb=X;a#0B>4-8U2x$*B7v*2(61O7)6FAu3t@^V8O8TFut#6s)?nHo>FbWljvy!2ZO7 zSxopeqxP0L#qxBi8Hp4r($-d5qvkdsXD&z9QmrdBRDuUSGiiLTs=0A~;O3|d<8vbE zqVo%9)YuSK9CqlcB04H|Q>>KnMV;y;s?U3+i@r!c^KM6ni*7&)XWTRuFBNKz>X}CI z0@IC2qrX&F10=rHqk(8wb71#qLZW9CcveWKOeR(-8Q`DX$?C1P+3*yiS90sGE7k#Y zLLowG6>x06TOUZ;FyZN49Z=)~tURhZ?dw>meP*iJ3V>F5l@$%f3u{2XpOyk(S?fRw zD0d`y2<2IYZJx%QmTkRCBM<7V{^Th1<82k|a7f~c(7gkjLH5U>Y>Hc)VX_YE@9DyS z5L5QZXn#G*8(F7sM*OGfFA#FcUvobtzV}&}WL`o0-c#tC;ns?-C9H~%VhA_tv0e&J zhT62v-Ik+l3OA#y3yXTiDXtJcwCfFH-lgRKdB0a2>Z2XbEb?_A5yp1gLFh~nV!&`j zjy?#0CTi+XjVlgOC&HjA$xJ>E6G_ti8d_;M}TOMcbb(b z#T$hY>o1l;QSby^PAO~Ffuhm1v8MHQis&UI^3vaK54dM-f<$)S=}ygpdWy*jkl!9; zKpIZR>eo1EZ#Ss+r~uly$;6kuH(lrZP%#P$mGC_Sm)F3!vDwQQNo5|D+uNaAp0Y{b zgJViBrDJId`eqT?A!|p1pW5#ya;ro98dr78H@!=BVL@53kI1ZwJ&1T;MC#OGi&KvE z#LyBV{&Rn_-V$c{4bU9kDeo*dz0fH#mhuX*`FGtfXM0I#_^-$X^-niC|BWm77s~oO za{aFut5ws=P<09Y!=8EECzJOZcGAw*Y$p{Ux+sBEL4k=daFL>}x%X5Xk?92WATw8A(cGfTs#7HqC3Mv-Rhjum4- z>H|GsG-ezsiwT?udkvf4dPMb{*#w6t3&0K`Ad<05Vs5N9(-6B01Gs?GhaeOQG~yVf zn97J#XE8UlA-|pNcT^)MFAOt699)7UoEQ>2F@9i3hh%l9kX@PQwDK2bi> zc*g7=^ZJ4;(|Dq6t6rl`d*JutLLoT zacI}-7g$-xeJTp(xPb@%G=6j_)ahTBz1VIS>*tZZ(DRi;?(=_vjsOW#)lupYjH|6M zE?=wc^sD-@s<+voVgpAfg|-|TtRKJPS4JGZel&3Ab_&Bw~*av*B*t>VU3tB-KLb7hS`N?E55^>J=xzi!dzkU-fz zu4tn$Oc+fLl0nbpKK*39QEQYAq|1&*`g>JfPiY;Ui(d3^y~~%qi#W6Ob20F73Oq4V zLbp20;iM9k>6hAv^|Sh#A5~lG2C7X-tl+ZWo%vIFb-cXhK`C2ed234`Pu>`!8qqbv zva?wEe{7hZWlJP3An0X@@{5}f3NwPp;8eTFM%^Ik7FYj<##+p3rT=qi9nthFAL};I z-*^nv8K#3ek~eddiQ*)ukxnv2d!)px*Oc8dcMHLxuReLrC$Ek^;?56q(rduXl|Z-V zJCRKaxG(=t{j3F*eq$Nkd;}};BF4yR<#o*k5-TxU+-9#^-l`A6^W=KZVQ@XB@i@EB z0Pd9m0-TW}BT45R3HGq9wq#gkMc{SH_{KRx<6{1Skw+j@grJtB;8bujkW+6N5Fyor z(pdibZCq{n+fwmS*#)(1YHG>&nxE1WziP`Q47K`-6XE$FC{#V)wbj~dg<$PfaeYZ! zNok^?N%Z+fa#&qADeQHa7{pnts;VPmJIIj`*sX24&-H9ex7lF??Hrch2+ml+)B;SR z0~fI@ZI<0eY(B(c3{Ad?$AeHz#c4Z4w9L2a87(af)niX-OB&)CSw$@33=qYZV6*C* z(OQ=E-t3Vih=okFkh7gf&*(_Dbkd-yhSMiV4Ft_bpngF#_C3}h@1=h8rK=0YXB5;7 z*`$Y|;q`+hUa4j5m4p?`9kIy0;<*vQ>LbF4_{>F!;96F!mPGBvIO^Q4NbezyBXakq zq~)u*&Z=xnfo^|)tc99o&o5V8R0C{4Bt*^;Spa-N9|T3Pc5;`xiVlY*t4O(UT{9%A z8U9V7DbWXsf#s37$~zawkJ?2s4}Y=PShUTGhpz+=5@$53L!1CcpB%-3`mc zy-nA|J3f(Gc{YnFtIY?Q94u}tz^kRy-_L@M#GnMc?EN_1GDxeK5=(U1s@HvMbf6JE zLGb3aE2V;c^hKGzfm29rTO%r%I;tM@54iwW5!zAzS$l?jqnsc#oQ!9s-h%c6zO)_U z9TigAi?6DyP%YhT<*6SRmckC6v8o8}l~PEarLDr_Q9z9MWW!&T-^H(>N#NOj~2N zIjvebR_}zhaMxlU_+rd)(Xz*Oz3Wr;ReTD@E=U>a`HK!K$b_r&QBxalu!$FWFDN zrBLUPOmozk?Z275h#fshrl@lT3pbgA6!?PYp{UQ(ZW{Zg{TSn1gs#k#>Jz*rP!W!d zq`M{h0;){+@i2Cse#TQc^5qT5c}B$@S>YA`{5djnO-Axk`IZm4Ov)oITW2Blpd?+D zapo$b#?DD*Eg-QLA6urERq@ViB|xxDAYII;^Z>5R7mW+Qq;q=DnW{SaJ|+;+vN(t! zebOn-9CHz+hskYK|5A9JmWz#*-!E!!ypE-jjc?LU%w8tFZyjwoy~`Q<+)`RVoLVu$ zLTNOzPbS+xkpa*+N%KS>_JlX{%x(IBU)+(i9ncYd#y)4EB>Xmj7m?08#T|kFSo4OO zVRo$as~w|jT2;DqAezbL9E)FvQ(4qk10MA>#jZ1gG-Re076b+33p**BRtYgX)^XXlW<*_t+Kq7QS?{RgfaoQYnH?AehZM#mVk7bQw^gP>-kylxB*6ZZNrT1#YQyZV92j|}-a zz8uybSQHLpP(1k0@?Uq)yiU42^Zoeam-eSi;Kv(GA9puRfkq&ap>Q7`NFL^OaA2D! z@6UZEg7`bn;8A1(2`}F9eJTQlJ5Ru<3~$_xXXYpkum0f;PW-Z0w^Ee!eF||*8W5N+ zu3#m}TE@!2P#zG-B*xGd(9aBYz!8w}3QixfE@q!GIdx_&$~tI;um_OLEJ0{QuIx~V zX5f^pF5!@{tw~WxBt5x13lX?doIJTZm{O3Q5bYOzkO_`C5iL<%Jv35BR8ux)-<;eO zxouw^*~3tBXtP~usHa_O=p75`2Ixf;Z=fL}JmZn^F_Lp~h~xzGhStR&Au0lMb(zCS zaLDO|Sl;kQM4MAcgh#^8POX)7^=?CZZ=sJWLAps+j?}O>Ks+^cRoU{A4W2ET*kvhZ zm06AMml-v{_6)+WV(b+uz1hXK2WCP?d3)Aj!Vi!7H|`^5wVsB+vNoFOJxUzHx!tep zh5<%(3>ON#?5LIFNW-aN5xp(V2`^2H^SdE$p{%SIkgVf*f~@^{gGtF)2L@L6-{REd z7KcL-C@P~AIf#Q*;>URF_6);TE4l_)nX~X@X}^-w#9vKm?QQhN)AlI*4YzJS(T%MA z(I2Vzt>u?kK8xpnaiun8r7E*@Xlk>Mn=+Q|$7!Eu%^5xGEX7!VpNnxnkqQf$C3|>m zmBG)&!*H+07HgUpiJGH8w1R0y(+^a;3!E<8xXAc$w$Xy2rg)H4C7G?(5f>&>kiJSs zg(kC^pZqyax^CVq4M0mwq8K6FOre$A-;uHO25iWndXraXi4v_Q{Y*$L@|7~nP;8b~ zC9x`PUQ9}2iiLiWD@Z|Lu1YXo|2FO}*X-m(X~L6y`I%tW)RX_%l?si(OC>MfzvLXZ z#$bb?^^D9#K?O-gRDZ<1pp|S4w)Ea#bO{?-zWse~aLj0qw*LY^q~Kc3VA?X zPE}zwfRy$6I%YIplZfCbD|MOTLgK28hoSovc1SnDGu?gGVz4>xIdSJKiKl|c?sc4o zoRHKaeS9E?`R@Jq~q;|JG4!{c@=YB<0Poby=oHxR?KF>wp6 zHYI!u!Tw&o$=gy-AIYvdQL1Ko0*%U$=2qxP<3#Q|Pj1i&*X zHouq~C3irL42x2>71!T&K}WJETn@3+8;Ocz30YcIx_SwVE4q_K^VfOm-U1X-Z|YPp zuSCI%~S<2FrTTbVc$~cm)@utYKUP z*vyD&|lYo%*y$~D8?U)u-gE+ZjJ zmw=sZYJMlTkjQvJ5&w~!^{J>vFB?2y#pCSY*`Y%V$c_E=YN5k7P}^5hE+7>CDMR}S zjOUuleHU);DJF0;j0khl2kfUr^=V*d5^!anI92Ydnd<9pLimu@Vvfi`%YIY;Fn)Yz zyG{}PC3U2iBIa*C%SZi$&}nC}2okb$vZ-eoo%Z6y8XI1k{`!TKn5pbs-hHJ zPC0j#(lDlI1qo!@{5SMaW3jq1J4yQHu9hOS_dj{7WMtg*%74UkBVD*$jwWkHX6PLl zFbKEx=CX&^alQY1JWSmO%LA0|cMrwT^yujaqtlC3I@L?ZichTFo&9}T{n_yd2$APv zqs*+#gyoYbNad)J_nDS)+t9`5Tk9r7IZI{E)ZqNu@p& zUMqH&Csr~>Qmc^GyNj1*lJwA(HfB~OIQ1`0*6VPxi*8Yf_weX(9J*3FKw-~1L=2hb z$LT8~LTvpm@$#DwINXG7ELP^Js^uM`cqU6r1vM6=snU&PM!1vf6F>P}vh`;JI~zIl%apMZT0YjsZs^`h ztN^0Muo0XZPRl;=ITQne(*d2HaNXIos}8jW-W(OCHZDyF>Nn%Tzc9 zePPt=?(q9jfSBCXHEzq)leKVxueUKxUmksW0k>Ue#r%4r7mzzm4@ugOoixsny2}lu z&Q#8PT2Up|pJb(DMuS})8>M8N`HMI;<;=%QT3wb*u8_8E8N>o47VW2(mDbg3MK7{S zfG;cF?YU{)*t<$pXugt<=5N!{85lxXbx@pp=uEoO;OGXl9w+7yCsdiD^DMV|s7!{{n*aZ+{r|%LY=Wya{OtNtk zU4kyQgq5Wxfz4KeaJp&b8CiPrXL>9;5Q90Sww^C@L{q*CM3pf428p!Yvgm? z@1LfZL@(!?in8P_t<|Qma4dT_8eVpPzOP!cjTl-@99VWG5}5T)RMV70=tq$!L!c7; z%Vr<3%4{|$ETy;fk-IsWi+{6jkjl^C|7lu2=9+T7btJl`8;gWnQz}H&4E%N$Gtwx3 z@XQ<&{qV&ZO%F1|3qDt`Ppf6sm`S5EtDZxN(C8s!X*r80A*3$xMrg;?XMwAoqrd7m zA3sGgtC-?&@ZuUTa_*Y*v@0T?&-2&89SjhD+k)^T%c1Me7w&@{TttvYl^;mGuZVsLrv?RtijKB^EHZy;#hjJIB!9p|*y4 zd%pe(rjxc9kQsJ!6!K&8^RL_N`8z|$PXutDYq$s%u-7wjo%P}#5ZuNK9GP+aFg?-l1( z^&lkBIh`w?~zQGmZ9D=>0;inxMGJy-&Rst~1?XK|8oVJML(q_T$5X)|lgY8?pc| zJihk1+=7;vCcxu>3S)r=%xQ8il;qxRqK~p-iSJ9#6*-aeWWkH&-m+59yK!GA#dNg1 zN8Pj7oivv~>(L+L%N)}yCB$^)g*vhCVfvBM96ek5{G{l3;NZx z4cjtWN|#Lf2f8-tf}u*_UD$5k`L_Be=D$d(_@SMXe?92H_Rop%e|lYV zM)qGB@qf%G=6=;ns%mJTdV-gZ9Z)gcFl9xhaiH8;3}A$my^;jvFu`eCM)ZSWnLg#O zf(`RqSLvClr9Bh0HcQ$!QLR-DbxWiC4~X{;qCVRlgFq}TwB|QF$LGy64p)AU{Kt9Q zpC8YdK-o8bABUBdRD=X~yHbRg5$(i#cE}oH-6VTi5guaQ6nlQi%VNIY_C%05M15)h zUq@^)%}{YbGF&+pHz{qO6?E7NoscNsAQLqIq71FCZ zKc10gVnqG_!`NFzMHVdEq799^ySuwnxJ%*g?hcJ>V64vjpv+&9k0 z+56s)tXiW!a*UOk6_F7!=M14fBQ`0GpWg?mdPU&=MAjjBVIi=vu{N?aGB;utBt1xL z)}SF`7Qp?*Mqtj0GbHKDWq})R4GW6;GsCzCjm5%nb8b3d0vK$N?~lKM%ZKY^$sXj! zX55a7O3fX1$38Och#Ke+8T5BcR z!ws=dzv4Xxy$!_80HZ^&v$#};NOsVfG2UsG>|QlqnnG_`D=z*ci6qe_v*3>%g^coMzQ(>CieeZTlk(yLqx8i78)B z2VT3`h!^AcA7ErYEpu|2^`WL|Su@a4!=_;4Y^fc0X2+<-pS-s?>#w4qladzZc=mW% zH(4sl&C+4qC!sVWX;yEB&Mn;xNJ-1mMphOZ{@w7%Uc`139tX}1mq!718!sl5>r|sl z57TaWeqwx6FYBE+dxJ2JXdBI7eHK`B#qJRJLVZO-u~}LMA6>3POR@E% z-l%>7vMP8IhjrWPDH%z*hf?@})b5n(q3KF?$|O<~l~jV{MTqfZX()TDnHs#Ek#2pp zXu;n-$4j!ra)N`HL2poDWO5wB>Qm->J^I5mt41313abUxlMv3*Q+7N()pcXCgqW%X z-7)-==KPaI=428bBvn#5SwSh43)QUFU$=py+{Fpl;nTdp%^qf;YKagwJi??NNkXZr zlmgq}JTHG|I6#^(b-qut*ApiDujpKfMpd;?PVpMP^g6M)Vgmws!nj(#CU9RN>hepb zNUeSr;hF8q6GteZk%i#_=`4EJ5;6lH)5#2SUTPn zC(x8DxuTw1iPm-)9bdJQUm7l1@NtmJVCP&|dTp`|$x`2JA@v(Kzjd3STZ_vk~Bc;dAU~qu$N&qYU_Z5`@#yu%53vihKx8y3G$pus%;d_oCE-1b#GS zseiT%TZ#~i*EU_(NM89u?~n{;Ea02oD5H%ps1~D1?!QNaEe8c(>V!>y)qm|J_}~2> z@|Y{(fWoIZ{Taw~@b5uI3wy18pqaPR*yY$l{%jm?TtGD`7eRXN)_6Uv_FB*%Cf=Nj zhOyQ}yV%RJ_cqNoF#&LM!b<@@1j^q#{kr;kT(~^$NtZ+x_Jz!%_oZW}dC#a$x_?Rl z-qGjlo50UM*N09b_f;LiDzy~i2By~g|H~f1AR{#0I{JgvCd08 zIiikBI^m~5o;3cx0=qsq>4EZRcYa=tr*sB7XoXSW;xY`^gGd<@q#R@#BK%%MH;C5c zh33>T+AMS(mE&KP)%zMw;P#itr~a?8)PG90Is7oPv2?azRQT7on4P($-M@Kqs&Zop zy@F_8V!Q>yZvpriAwDZmv_it?CC0OI%2g_~H=1fFu!P$M1N5{{6qHW@CuhB@Kd3!W zKwNlS1W1uVA#@Cp-%DGpD!QndHqy6r+!i5=vjd*VgnYj)3o+woSN8QxCVK&oBRCfy z^oLqA$z-spC(gY7$fttIc_%6$%rYEe;V}gZDJ~f2@=d`0jM>7^Y4@;hS(6^S|NWHh zGwFQEcXYma`_$e231WbI=~1^8`CTu{J3;Osn9nu0O3Vsi@Ok$vqYjTcq z-R!@?l7NL27v~69V(Lw?9rX>J5awC32N@gn`Acko(o{C01Se*UE0h>p^M16|UxY;# zgd7CZQDI0mEAEC{Zsb`!?rJv6{*|37T9LkHG?) zUg9-I>{ydjcXmNTi(MImCZ3 ztN!1(6lK(+^Hwb^)uA8!dX`s1xynmoDRa(iDO-*+rVp94%yhk}cxPv4@)VFmAp?C> z=t|{Wps_CHPWy4s-1sOV1J@Q72JUpoHCU_k{gR(Eb-nQ)?!mKh4OH~f{56NOC>_#2 zGF@cU-B8pcBZ|MCrq*y9)LEmfG#|o>&v2sYPlFRkj%~4}oe=Hg`^}k6w z;dU>MbX`TV~{-dJ()Bbin786 zJ?xtW1D1HEAv`esIwQ`!8#7o;*fP-yY_U*D2m^M!dv_@CIfPe)v8J|aSH@#Tv)>xk zi|cVN4zG>kuv>5Rwo``3h8yc#yH`i&aM}+JIUHsMBUswdqTBN-^bIdiJw3Fl#Xu&RhT3VU^sx}n^481w;VZT( zaq_rz9Rk+=0lA-wvDf`OPC)jNI$;P+y_x2`!ee3F^e0&atzBxZ;wo{YO61I{(=?>XNi%d>Q zqnvMbcJl98-}XkKQ}xLbxYZ({H0H<@F9nl@{5JXS#w)`|UTrNuCjK+pSuv7tOf#y` z6rCRG0SzgdOLCnQRq_nB%|S=hL@ez+yD}E$8ltDQ@5JYxd3?rU+&X$fPCrV(qMTvX z!Ro9-Q}(2hF{zub1-YKFUNvb5?s8xz;Fuocl;d&_fuPoT6yW_0yyHqCMD+_LB6Et5 z=URSE5MN$XLtpjxIB@>#?60wm!iw}Ga8JKXkG8$ zVRVV>m5N0-eSi8nKM&QEnFI8s{%(B%tvtz_zKZ{uqE zKTY*7O%DUK1N6^M6V5~%3TWtDx=;ut4c7)RWMt0LATkR!=rk~YCVQYEX<|29pjWCt+rsV9^ek*J33!g1FTkF@&ahu0sJB?>j`>v~rzg<^P zm-Za`9uIYi-%^xY@%-pY77pf%kkCki8B~W$6cUB4(b{D5#?ae@^O|7TX&dG8FoLzv zt&}Y~h1F?Xl7(^6+tl*xf<4hrRV@;QGtpP`r+>gaP&caPouRK5e?eV0G(f;>ssz~| zRSY=v821r~-C#Tl;@*V`&~KTEbC8^BL7kat_Sw-j{I&6@`gMgbU9Wxr;8Fdd#4i=> z1A!C|TBq6PAeO){O~OoGzVMG&!}oqBCYFM<3Q`Z`Is0oh9EXhF6}!D@>Ur(B$l9Ih9^Y3btKNI9&kZUXCRpqGieJO z@6iqdknAf9$EX_>5B~~mQQg&9*AKs|lOMFdQ6*-Cdg+P~*uJ9q2=Xg?X8b7F{envV z(O04AZBeb?(H72DABoW~E8MQ3N#3)kQN4!P?-5WbN6U|JK<_G6V)rdjg6}GuW%E`m z(gGAI;p?ec@^qEV%D$--Ip5?<%K@rqZM=1hesdKm@pTo<~>;LrKek=H@e zkvE0XGB@PO>(3q11S^d~qA5FLH0-;yH1vNOXsY7+8Byw$h6kUkr7KKap!rLb+RvTX zKgwp=D;a-49m-`XSbo=2wB+r|n+4in&R%Cpzq8!#ZP9>VCrI-UC|iCLP_RVmR9)moKH+tFIBQ^uXa5z%t5brkuO+aRlyms1alY^srh4RcBy~oxUFtjuLbGRVGzD{`nKS)^*gG z1y;FtmtSEW0~^{aWyEs>n@~l?D)RT=JNsYVo_2g^M#^m2*}I~VAh!67?F8wbOO?{u zepe)Ix_OPVEgwdLvL*{=zo~2{=mgJ5;^V5N_UWB;^=n^Ei_>UpbO;CxOU`V(y;49F zn<=mmh8QWAwU#r3?zBmE0=5l)G749Am_jRirW=rRt@3a_KH6ScT%C~XEMr8A=RdT& zIapQ@F6J|HxONISM|zHxKTsri@;muBdr4^1<`oD2T2ID0T`K6*@(YvC$zB@z@!~nq zIMVXlGI2sLvG;U7B)#GFyhb6LwQUnEVG{Fr{dSU!+vW$$j z{M0KDpCz0u-&Yq0#jC5uOBvU#p#e5`a8OTAx=GMqVX#q^aIqhR@0`%od$m^91PL_< zx!i;Bv-&Y01^?Xs*VZyw_A{~pBkDOwrvS60!uK-WpPo@Q;;3_qs5p0{_g`8L!&v7# zRUS;%L|D5rA0eV0%CWbH)K$-_C%KcMOH8(xoD3JQ;`CjC3ehTyGWfVDbH_$UJlo^gv*Xy@b2e(Cy9sGe8~ z9`61j(2cxJNIFc07<)ru#w_gS9B<$VuEK*hX|M4P#b7O`@k$TXeI>K8u$q)LiC0a4?5=~gI?Jb2)Y-gpc zRkbo7b|;s(GQrq8vz13@jPTKi(#DLrmF$gq@w?ImFK+7>4Z7Y-E-nH;LMXE`SK#{E z@BPD^5vi_+mP>is18Vo`S0xtLH58g<2-zXXMT~qJq6eaeE2QP4ikfS8MzD7z%h3@; zQFgw~QE`fqv)E9#zRCK@5z+BBc)iZU-s%|rp2~5w-mZD1UPNPX3@Q@aqKZreE;I&< z-Ix93Jv>(YL4cNN#yQ45F!GtM<|ni)|MA%xe)CgomiTY zNNR5uKoIFT?O2wwmVKXNIQh!8w%lF8S^ko)%##&29(F1ckDRS0)(~AR`hdKlVHF8( zzatSW*fd&<5{vaCAW|J*gn`C@6VCL<%z=n1RkR|8EaZSE&LdGlyiq}TtB3PTJ8qzT z#npO-iDAG-<=dM&Ww+*SshqC|d-lO|=FPgl`m-iUcNdO)=uKr}4t=yo=wLU5-_a2r zl5eCw(>wl@UZ_&iO~Z;0TxzR)Zu1qER!rY70tO&rr`?nyTN`dn-`*p^AWF-eD;h(~ zoG;oK!VN``0PBzb-g}zm=^z}PAs9Ae7ognA-9N#CvUdQ|5ORbsLs4yF+`e>*fLh z8T=*hSZ7IKk=1LEgUy7^vwN-XW5hWZV0b=1T;M7^7DmaurP%O8DaO?Chw|w|;rh~a zmP2qA>@1rInq*t@B0N|fNe_*AWh^ved@FzEG(qR$^T?bVg@gF=hJpQT1?P37y|e2_ z+VqFA?!c1Q!nqQM)`o7q1v}M9Ttsl~V>iA=<-r~5>8-`oBXB_D3-IvKHS#BGKJS+m z@3JH;=X*QH@1G!FuJ?Px%??Cm;6j9%Tf=(0NDEwQc)98!_kQE)h{wO7fKVJg-E@(ZF<+?8qO<=(b0BgY=G>s?^jPN2;@Vx9QWc0-Ez-LuqNBkX)*#VFii zKOGM&hr67mM_o-&-A9QSb#WfEV{dvHL+{>7{nYEGk1mlR54nXZ^~>hoNf0UcH~(|!Am zAUBL0J@`Uu!EQ_?h@;DQxE70(uk$iaXl;5vd%`~*XS-k+ha90LUbIMTa#8M`ZrTHE zY(7{ny5~w-1&5wS%SFy%kr=tjZ(G|Fw=A1W#-$XYojSU0Vy-hLuacgu~SoFAk zOtVY!c*YW1NAHH8W21KMUiW{$?@86PI7nhYp|+tN?DvDVfxYG1(MEMicz5Pr zbesx%PiQR|EZkwAufbCi3X==)%5@RCt_93~=RSJzD6;M=gRM+PV13g3tMekHGTgmS zd0MuT7VUX@7>NpLuP@<>MJE{?z#2|_COw2iyCbd9yN)09zPFX?sW?0?-kFQEFR<%O z9VNyVh3>)%DGmojGcQv=P>b05{HIb2YXT`-1^l;f(Io#Zb@!h#n*WXo|F@LguSDyA z=qHA3Sz4zy!`jl*Yzm5v*+tTc(OE=8_hJDRP}y<%OHM8|Qs{n}F|t2buqjHh;|TmM zh%#@{kzUDtKG;Zin{+mt9M9J8?*9136x$Bt0)7W$1%rXMu2fsP?Fs=SXC5!e9H~NO z8Fdsl@EcDq4{bDEVKRgz&j=?~MTNP*2qTfkZZ|)Ir&xt}1F``>Fs&fnhmrF%ikdLI7wh94o1)(rzrN3GQA>OgfxHwjzC6%+^voDoz4kFHO zZ9Qul+(38o#DTwZi_Ht?E7_!H(mYe|c_z%tda-?AlkrVxw&FY+0?*B(yra@sEID>d z!rFh5(6sUhQjpxDikIuhRXj4K44fG{;dn$a_Vpa-*30>}9+^l!Di|Ci-okCgUkU-Y zDVIav0VhEue$UC`^#|IAQqmZ0J=f0bw7zt~Iv zIg$MTcpCp_Nj+Iv`%5Ya?W0*RF(gUmx4x}VA-tpl*o+!h5KF%-WL6nhpgr-%qCv(y zn~=|){WBv{^5k$-*WUuXsiB|i1osELTijlDUXSNf`deM!)Bco4X|rIM08-);>qrb0 z`x1geQBufFj1`!^XpcR${>KhqS9v5=Xn{20a^VJmN7h1R(oy5lKcJj$-XYP@#Fv^;VrYd3Iz`Obtr}i^bN|D&D*e{d9t~mn?m%@|ad59yG9@kHE z%HdG`a98SW^_b7uYfw~v?;UNWcN}V0_fP+LFl9)eRC?HYqhcffQ>r9`{Ks_o?GC?e z#?v69Lyr-XBYMki3I-m0(mK>TvB*rYoB&c3XZ=D-6kMR za&U}w6B)vVy~>EQ8Y_pq!7FCNII}4D^PCS_+~MaY1dm^qIw9g!0jZq8?@ZqXE)1qG z0VO(QZ3^7?8Z%B~GBHkD99143d1Ux#?$;5wIIFh9ei;zu5`{+;diZ;v5S-h^xIZEU z&>nvmGz)W$Py%=E52a#>?r{HsIuKo|k^hI9r`>-&(gObhb?|Qk3p8~xG%<8B{6ESf z|C_<}QPgGp{tv-gd9^0#1x{#2?LLbRBy!~4GD#J{y=@z>mC2fA@mj>^en5~U;2Fd> zskNOS#g;U2o!9wry}|sk!>^+YWO=|FMOjJ~k@TmDLI@obnUW-FvM|AKG_yEUtOnQv z+!=HoOW#Vs?=hq%Sk;iqcT7ed7Mm4Q+f$v!AZPmDr<;xvxFfi$7w!q|^%qzRw@o{c zL+a{%H~nR4y6{^0g&Ct;C3ktQt`syiG>V-@u8>UnUo?+0YFWWWD z=Wncc-O8`O+O`y``KFJcdIiK43y-nDaq`5bPZ~DLdtPGC=$-sX*d+QO3xSmUL*QB1 z63!DD@4GY=_8^@q=@}are!Z}l^N*d@8TRJ$`NEg1{a1X+e}V)3d#C@GjB1s}S95oD z^bb9j#8g=V5Kt+o0Hjfl6)ZtXW(8{~1*HW4AmQbb_~*lFXk=Fngl`Gaaz+8Z*~ z3vB~Msu4(%b*;vj6-`BAjY-W~GC~){7k)!m9UdbldU_YXg&CUL;wZ<$g^Ra&FpKlWh8Ki48TUPjZH2vsd4j=o0K;d($Zx_f#23Om zLFZ^FL_Hyq*@eH>kgCcxX)`&&?tHJNUgmIIqEg zATKt&!9VPPC`9XL=&sV8lIO+{{hb1wm_1kc?x89ig5GExfHI*^a%_b0*0x#^vI*Jo6`q zol@KhgFVx6GE8yX7_OUpduMho=lssfbhF6Qpt{Bi{f!0DywGE#kShrRl;>%GFVW!l z9i$eRxn`VQ&v{%#iQn-#%`;eG;qN1s;~2W`GQ+SEynxNHrKK`2&<#EUeXeq^w!RNh zg{S9-G)A1T>CzGg9K=R1AIw~|Yp;x`6^c$?B+^-kPniA(dWenU+#cPojmZU^oi)vv z(|89q{vg92?&?%nkFT8AHZ*M#>QTXg8*L?d?IqmjOVp^EpRg9G?W})AicdgQqi5935kS?O0<5EZ6>OYi5Bd#Ong%*=L~RC}9Wl=E zjR_J6aJ8j!>e#?gQNlenIXy%+5!#r|aMOl3MMde&TF(jKfu4I|$0^A9u7-_>qiL9< zfTu@AFT-c}!{+3M9qQ#B0(qyQ^ch|W0t{#h9?(1unhF*h$pO-Eckf2Jcm2BzMeSlD zYf~b$u%|VqE`^*-|Jm6OpvTI%dNmGGUlo_c(iV5icLgo3o_Rhno+XQzf}OL@oGZ9l6KM zu(8$7`%T95IS3E>0KV7SgF`{g)VTz{Kb}^z1%O7pEed%WWN|J$`47Rm{G8DZ4Bak9OJZ~;_26-* zo09g_jg#oAaPfXcaGH{dS3B{c!SwJbMfic!e%<(7f~7#d;?#NAwC=@agT*u2=wv$I z;@*!y`M+g)m676|d?PSfCOZ*suKJ1`d&By1FcBe7-WDa7`bW_}>K7zdEaNlAWV$Jr z1uR=cS63B=SIeHrD24duk*~grGAz3-&-v{-z4eao&k72@y_lHY0+_EGYD`C^=s+|Vley|g8 zSVb@3BO)%^DjJnKkA_rt6722GUwo-UVqx6%mfb4;iiYzTb4($gqA@;a{qy~B_$itB zF_V-~cgTG}15-F&g6w3e)k=cG%|9cezqN3e>Q)P};Hz61*AK^2{vge=lc;$YPw=8q zg2LjT_HijV$BR7y!QQfreJ*tU0G-hlt;xk3>UKi64D*z`aoHGD8if49`f|cEmO6IR zQPXc)Q%_E$3(rLC3x|n}ydEK10*hu2&Zi&5opdoh5T|c&E$KY$^GJ4O$M<_M;3LpE z>4>E5glB^x{Z<9R9M3)MwqLD(5MpFl)hUu+8gdEWI;TaCf52P{OO5cOIo3%di%yU) z_-MF}jcx{{N?#Jz1Aa9JR5ku!4_tuZZ0-%b(D-TXzN+ax2&jrN=jNUquXIG?qKVxI z{W%KyCn2Kz)R&E?b}8>3Axj3FcFTWpDEB38g=5@b&e^*s^YAfkJ2TdZgjIckD+0EN z`Z+nMdmoDC)ln0o`4J(j%%qkv)gS#~uM?%V@*Sh)XXO|}5^)erP#qp}We@zo&A?}b z@yXzhB$5QD<2gyp#=wjSo>$l&mmR00OT(?`?P|n#DmyE-88-Feys{r?ys`jilN2zO zgX8{PE%P6B&RC_&3kwvb)D%>g4D}fERRa!4UO+fpZfWB7Ipb*!%j|T0Zg&$xKAr%bPi8x*QQC#^$oos` z6I+suA6N$krX}`Nyj2sja#f@aLz>%kuC>xWgE62-3H)^SRTHQESzdLuw^UAAcdX&F z>ursQcely>PTryFUaarS#o1^ig#`7~j}H)&QhKL`?QJ`U_so z++0U<_K~hbMiyr4UlNfgH}77!hdsJtn;1@+(op>v*=-!~i3PLej;|4+WvwYn`;^z1 zwin?;Ws+Nh$y@%aV2!q5MUqlRj@sz=o`rOYDz1Y}&}a5TtO_v1os10PSd&r_F9uGdWfwM4X0y+ZJFsHBg02~-#@5|1~le2Jo#uw>rGBG2TWL=n$55Z0z;%gijD z15>qt?Urp>x805Gx>oGEVl#frnc0@?ongh8${(1`#pYT-eeoFy#{%H_Xk}dm>3j9f z#>b@JSF8yMMG!)NZu~{0RQ#_*%Kvszceea$R^q|g=Qe6L(8~1>%5t|MpA~my5H?>4 z;KncAM(r2W-T9HL{akiFFy&0qzF0sWVO07gALt!s$AjCB~y2J!+(? zdYHH3KnWjJnET4HA{tc=k}`AWbWMrbcg~T6A}*0gzcPvITgE8saR_)wgcwYyZX~+e zdN_d$hw;`i$8C7xknxVIulC8@5!?^sex6!=BqXP_Eg~lrzl}a#aRMe9b$1}@z1-)T$H#v#3vpSDNoRwr1-h-MZwj0e?lcucc zQ!mln_bG#i>^_aLoYZ6DUhXaOR&Kh2{>qOX!L}b_x3`){Sh~&M(F+w!2M@Rd&SC#? z{7B*OT{^$s{Pk=6=e42!zpRavtB0Ju$-hB`6F$)H%7c&3>FJr9Y@NXN<1YS}J*) zO808u2teDq`umYXcr@vBIG)aGp81>eVMO3+4gOm}5*@3+`glqDiwkRx;UVip$Nc!% z_!|^!j_Kin8UNb&QJP0^x_X93a{3heorjrkVR{Y6o%#5iCad4%cn;?s=J=Z!tKZ1D zKKtEg+6$-IM^(C?ImSBOxLLd{21dg$Gi-(CJxe6JnPa_XmJvot#-LUM*908{Mag# z#F^@io9degI%nywxaxXgM5d}^B#oA;V+D9yC8bI>P;>Ufzn`qxw|O(r^;YK zxw|a7mr9*FdhA!mMio#NQKE9w_!SckHk7-|p-)xpf~$6ve#JsJ^)z~lgB9h^sWf`Z zgWak(Z8Vz-gPG;-GU)adyZEXd6%kJ=H@+NnM)Y8dCL;9Oz-<6%DpG4!4>}kJa%)P@ z7SvmSzZ>~dQCBPA$* zMh_OKF|=%LkBM-$tb5{$CfKBS&mV0AnD%i{eL8 zA|+5$uoxmH3CL8EZ6D}+Ql=hI^m9ruz!qroxe-`LJ9zo>2AHdc0Rv-gPy9Iq?6`Ra zox+5gmI1L>LQgD)#<-XEwgpCGkK?&3EZ!6YiT@an5F{h=b5xHXGSZV>^Cqaw?{L8w zaMM*;1NwkD=t|PsfSzEmYRYRj&`%-<+yHv8OA-gf0H%OnU<5?hfPfZgZ^3O5P<@o= zh#p2TUy+`GZ=^&}Y0xI9wMfUpJ<$zO9#P!0dd|T3DXyVGC!y1zHK4hm zxS*pNdz=co-B&EZCdnKa0(hZqNF4|RHo)}AulWNQq26Nr_jeBl!4R5y z!q4$QKgq5cL2HoRLwh)(+~q+<=%;(K8~kH8gtm!6r;ywed)mSHze@GGQrkG7f03@s zzz`Y)5XMcqzcvC2ZZtsi+ypcg zr4<>3c#33SGe~FBPPoSz8VvGoT4Q>E zV3$O-i9G_)0C9uZA^kmj>o?Yqm>~hmYd(SC9-ipWo|>iS*f&C)&+weGo1B*-oX_B` z${YO`v*=IpLf<1q%eU?~>FCd1dCFR$J-=H@wys#uryEE%khkwZCW6_ZpOw(4-Lj~f z-c=MTz9|%JAH@_-jvO<=>el(RpBkthZ}dQye)6!(Hz!ba3z8havT)G8Ga-Dwm(Yyb zndQwlap3(0@GY9bgsKvtQo4&|48xZZbfQ>n>hX^$Ag$4+hH>;pm5pMWK z2827pw8#&$pC5qo&7(M9uYraX&_Rm>Uqa;!Rqz?i)-dz-0&t4f^zrGd_|KbJv%@9~wvdId4Mduq&FlNh|AL+IjF^$}9EJAI zsGKkxp-U!CB7?67>~1C&X#=>`kY z!SKuE%LIrPdrhg6Q!RPa9iUNM&NQprNqF|ELg?YC!ndQI!w$)YDO4Iwu5QpwPqB;) zk4cs8xkDhuIx5ReW z-z5X>^+kn?gx5()H8w&*A%F^KOC6j-EzMt7mbZ|dT()Cyt2lCw4P2Wqq&kHtCzW>7R>9y^GURdpQIA zjdw*k>?5MV%I8J!x1$5Q7>!;IH3`u@)joXim`)J3m=VI`WNC18F6z$(*<;03NNoj{ zsdkp-VXr`^%zJ)snt-=!L@Pl00zmN{bEU*rMajwM&fxbI3yuKXp!gzclxm6(Iar1a zxHjwIHoyPPfkU>CB9FW`w_$N+9@W^VdhN+8=v#^Q9 zHSx0u_!tN<2*1f-wi;rE>1U&S5`m_}1*FeP&rY1cU#_CVZxYexo*INFDUpaHvnw&_ z7z)w+jGS#`Bu326=PSBNDIOY^b&rSo>0)LocO)qOE8=B-48X=Zo&+o3B!R6clZuNV zCBsy{+GtuPOG`i>WJQLod?9Q86dcre>?c@48YtQNxD8zr^}S5h+n3Tje5!NyQk6+L ze}sBUzkb&l!F5sg-bv~(9Lsi-{gYQuDQ!t#@%!ri56B7KHW#=J@9%aY_fINec%Dku zP&Z8E;E2>I8ESvT2V(VW0o(cqdaK(~_~N!=Cohs~EkP)!^FC$3GL!D*vctzB-1zva zbIzzj{zts@ouK;e%{6~)L8)}_!YwFk0FWnmZWLmOGr?YNr<@t7NPuUL=HNurs9GvI zy=>^S>HH<5o6TbgvOW+%N`Hv(`o1%e#1hcOA_H?bOkn@lD_t zv98QM3r+Cud+NjnOk4WvqKvN}o&A=B~=j&3CAVOmHrqD!00J#W7r$>&`|;lzgppkt*dJ z4<_m4W!Qfb^Wn^;uA*2pfIJsfTow`3@*Z~xDLS6**)FwYsnf?Z*wHb%6w_%Z*wM_B zGba?;3CQvD5}12gHRaZ-bTyE>tJymdBL{Z+P1Y zTz8yA>Bb46oy_#4fA5Yy@6KKP9KQ`dtbRV}5Xe&SWu`Qj_RlKe2O;dfUasepe}1k5 z;pijWFvFkb>%hW5RcSiVY)4A}!ej46@c7sjRT;Xz9hngASDbEE3*+gu=hqHy;YoTY zPvio8O1vjFkM(w_;W!`x!63#6o`_rf2w#PqHoW;lT)_7U)niQ{B;~uvw@Oc+x^fRE z+rXtGc%W$<`wG>*TumB>v7DI4!_V7tQY4A=fIf2t1&*qGBc4Y*0b@JWm7BX_dJP4h zZWwBuHP!^t!CdR$rK2({JDpQ#y4=5Tcxi`x-mxykUDlzgyUp)9mxLcnTaxmC=cSPX z9=~8_wmG+wU(ly;5xl5fC+I)S^~PNfo6eU5J*jywx9t!ikWz!kHcCH01j@CIL+$0} zJ?_RZf!YcYIQ;fI_Yt?)aC8e6BuxY^p{3by_|i{FA^fjIZ)5^yL_dvZY$)1{^=n$; z!B~`Eg+p05ZS+=^EWDdhNyWD=pxkuv>mwbsM82;E>QU9y+=iBCloe4nJ)g#02cb0b z4Cf*~U^Re`fqo20%NwMZ$X${K>Wy?dtG`4s(7K+CvYEUMK+F6g5a!5}>WJTHn*LT?fM+=dF(u-fQSI>~mJ&L8G=M3XR)d$%s^ zQyp`xagcwf9kyTi`#m`CfWXOV%yckXSXWM;=>@>>vY`N=_^xal_~(K(8wBEgpOrt5 z{wdGjT(otx`hhc_^MURfwMHW)VwQ3SS)#jfwmb23t*)ia!!oc%?TVkLj<+1U6m;m1 zDs)#?NP(~??r<K5oYMyKMB9HRSiGWqga@7_)fD96JWdmum}KN#p?c3EE!$;%ibR-{y^^CW z>%;QEmWmb%21wkRTm4N5g7}bz&`6yK0~7C;cwNwM>1Em$go8H2q1bdCiXp)zWX_7+ zCZAf3c}h-fXMJlkWEL!(q6uiK38bW=h2qg!hWoAVO}Lcpg1|HLo$m{zSEqpYH>@bi z@pdCXAzhkqTh&W(qg5-HfpE~y)EO*g;>bse6o0|1o0c zeweDDQuWrr{oT$7J zT$0NK(IAWTtU-9~jcy9RO?D>O3zRd1tNfOziGB=_EHRw(~; zH6s0s_V2xHxabUJ^`>H%hht|5$-y7?K}#jMN^YbwvB#K4@udnoaq1L2EWAP>*6US? zIu0IC)mD`9SGy$r6()>k5D5< zf_}d|uv-+JwGBo00BT&g^@Uy{yx;zbBJ{v(dztr(|~Px_MWe^7dwrX!(BK#Bqb>O@`f*@V5k7L>GhO! z2*9W3eE5o^AXDGjn;t(d)$gQ}A*##MK44JWSVvum7?!~46^I_6HHteAoZl5|cY>s{ zB>~%$ak0{0*|kcVD@Y&onWefjc*ok}8>G(e&DF5xhG;Xr46zI_orpe8+>3*75iCc@ z>yGLlmg!39g`-(y>t4YWPLzwTAzWgjm3mh+B(a0FlQcG=zvVI1BHFf_iKX(8s_G4+ zOQ@z|!XfLz6#j!^ofcfIbz*lqO@W`mzNki!>^t=wNxKE})iG9dlt79~;J1XnaOwar zlgeyRMN&{(;T-!(e^n4SD!&eJhLzE2B&n%yj$|ZVO5lM_EE4TuD;e!LBlGx@+T7-v zaSj)w;7967H{VH2^oS!AzE1&fElK&7m2nVbLC}DE!TI&P$3eA4G;?HrVu0_mIQ0vl zV){zd2uiC_{&iO;XNybuw438(Cz2=B-*NGaQ|o$v9V%q=**z{9=nt*R7K^>Gl4F}& zF?LRg3rA5T5w%{G^m2%Fmk%Yv5*>0`&`=PW@G5ao_iU)j)^Hm z6<|N*GKQ!T#YS^GJrZ`ulYMYv@dP7`D_5U7zzEqN)V%F-_5RI0Ha9KGN)04Hq+rb- ziIs|7MdLQmj8Y=jZJ-5C^d1Dw$+nO{9oTxQ^~@dXOHn#?65%>e2h9`%+Mo|lA#@KP zIoKeNFME)bek7JGR^(jm#r~ESq}ci`#Q6fP>*}IQ5LLXZ8;D=+^0Um2`Sxdc=%&P3~dwBF{|Yn}d+I*am^<8^%7dU&t%RpS-% zT6*^i?<>f2|C)ODT7A3pl6hy&nB#gb`_|lCbshJB0$9ph|10Vn(o!}c;2WT-zP11L zNt>>_1iI!A0O{sB%$go5=PnYUss1d~3ZwnxJ;EaYTeq^7L4hYALHK`7B@r8nVFXhuu=ilAfCfg3`tQz9xO-} zHk`LWOp!dHQGZaF-?UTyAZkE9YycT`P6$DGzo_AaC~9v+qnx>elz@0LzhYO8tV~BQ zE*!4>oLxLSmYGc3GLqXel-C+CWQLkw^r5d-e})W(%*tZH2aajm^}kyu;7z<;`_{*1 z84jU+etwz3y5Q0GrQb3=j)x!kUOCvR08LiNeJ+fkKDJX~FJs=~j0d_y-mktUFI1_T z`WUM~%D?(AWKgP_op^S;y>wO^<01MNotgQlR|gq%7mc7DXv9{&Vo|EMw{Ik4^-sb} zDo4nN@H;y|-r;7LTKq;;8c8aZ4cjwX+O2|;oS4&kjAas(WCn~#o7gp7f9*P`YqO*G zgP;0s6B~>IR%{b9 z%8j^5ENyVfB3AKaBtrG+<*cxt&WDvy;Ov7p1(cqoRylh0#*)LX<{X0EF8t2X(^iu$ z=GJz$Xf5mI0^Wb8DCJ^TL~oBV-PY=G#2@S2Hg<6^{2#duIJgI_%s2=6@=`%XGG(He zkS82CE9J`eE?rb+b2!zS>S~9+ep&*;ZuQMYKR_EdIGo}V92I65~UC4%tRQ2z;uXomq+n0zyv z^w=WO^qF>H6J8mx5<|n`!Nv}^;-Xx60b|J#Hk2)CN!(nAf)N@>DQ)T}RhwvZZdAA4 zj>o_JgBJ3Pg7_q&xys_Uk%DT%ZIzH7%~=M$DzeLjfok$u9(AWF^3FcUq4YFrUDm^& z<j2tScRR@434VhT)4o8#)$qz;|MiZIkb`O!N%ZzB2O zA}^i2g(g9iky{k4QyhVG7Q31|AVFpBdB~dO4aw4n6H}6=-^LXsbT66ZMUV7@8Va{{ z0I3~{xWvT4-7M-a6e5B4yoe~$Uw+k=?G)-rCUJj^TG2@7&{bUotU|;sxDdwz8i?ko z4*-rl?!5H|EEY%mG@3Om~{ErgyLH3$Zy8zKS^9#=XcRlRh2_ygbId zJkHlM9)jo|&yIS(vLFk0xzd184VK!$B=!u-f+Xn{bR*^;7}^kMLObEFLMnIZ+c0#5 z!+ZVT?>QFe6Qu9D1mR_n3FnJxI@r5}5S@|QWXlN(kpz}eKL~TOmbu^r zJC68Z7oyWUlUWgh9$W{Je(zxoG76Uo3zj^qf(~ zGHC-6c}dX$?NEp!ULNae<1fS<2VJNPG5P7*JO1|B)rRzaWFiXt4B@xx!0If<&(N1{ z3?xfSRrLBRwIjs}JI#D@R9oHQ(kQ-vk^^lxJnl0tlH?pb4$RKM} zYV)O>=_QMb{F%VhVENNgDjEk9{m}O2YQtK4hF)rO&>U9m)^(~Ve`PN9fLS63b)w`I zqF-!OQ6Jt$7@VFlLT0U&5JG6r#NoHc$VC1Ibq?C4QnM~xP1M>ui#~67qhk3dbd^Kr zG#p(8!EUZQIBm5lWeT4-3!+$l-g{)0XHOf!ZSMVg7~YrK+EdutQ=dys9+YFj6sc!# z8$`AP!cZp81dUW7r1?eW}wqjTDlqZL&o`YCWt&&(n#wX(D25frpgYj^`=GsVM-Ce08mO zXd(``FwJ3pqY2-QF6CiJAWMrFXa;5;JSmqZ1d9D(l7=|h0dmfMqP&8GMpoIW2+FA$Pb>S zR@S7;!eUftqP<~?C`Y6z$%sjA9XTC^GCwmSTmhmuk#hR9VyFUp$)MCbnNQgX=uuCp zPuB@CQdg-DI7sDhE%Mk2QaIsU2lDw!cq#G{*gU}FLV3||V;Z=IJvH7^2hoyjt@m6I zp&d?vVuISp;zGR{-u+kYhhq4^dIXi6X;h{)>#2uab3c<2LAO;8-hfS(w%NL2*kM{c zNbCtFxHCAmMJU&YBP`GeBPE6}K$Sj+D0V7N^nirm0TQi;+dG4I$N=_8LeB1_Dm6Q3!`88pwsFA-x}ND%+v#K zQPQ~2W)Ke-4y~5adNp40(QgBKuCzdvst$kB7sS-kJW!Z}l;szpyH(m)R|0|UD{^r` zQ@>cNj#*O(L}dIB}G!ljw$pJfTD>Er3hOoPMyiXi)7cNw@(2Ggm?qY1rAt9W2*njx=Lwx@T4 zOPfD&21s@dw%LNjFws16R#Dm9G73bMSPvWK&D@S;Y4Qan9TQ$F%2Z%GY)71P^Kt*#)W+0bI^%? zM@f%jB+Dc~nw*9%zm%3aX30bP?X!wouQ~sD<4S@4HovdOH4wZpd$d(J6m z#e?gRKvr~@W-5Tji{)Y8Q4g%vkG=~5RU26Gm5XffNuuW$%6+h0q%18jgsTa@@Y=o9Y>e(7nUE zOL&j8Xlv&~{FUJk547_-lswZ#Mcs{)4+F1>rsq40^jjaCBf^PKr!m1R)dQDSJ{(?j zh!^s?%-je8^~g;xjH$f~yUNu;f*yd@Hz_XQY+(GR&t@;etiajG7x%WGEFz2Hk?ldp z_^|egQ=3%MMgHg~NcXvp-Bkd%#};*e+eIGUCbJ7wvj#!^(-vst_@8kjUxE6(21!3G zH8k`zY5fkhpCwz9ElrY-4&UQu=@cbBuPBP;@P~vB*>+e-qr56@v12wd?d1TSDD@k< z3aykRUy|y8wNJ~3uxi9{5rd1h`lPkL_`{_w%N9V!7njk)K)!%9Wf|ts5+ zdE^g>Y~kxu-vO0*m0(Vj@ybc^-KnT+N(_rh$`kc_xNW)(WqiMjl}@BM3%BM?J9YT& z_SlQyDK&10z!zH7vF8n12Ws=HecEVl5b=k)+C*+3;_H2zxZs?G^T;H$EpHNm9;Q~I zW@^q1+x}E!fFB`w$9?LYpN`0H4`7gcRrw3qA#mpgRZN)n8MYwhgxA)_JGO65mVIB+ zy$bSP4^PI2+PP6Obx)A&Ypi|>7^{T zZq|e=m1gd7qOjV*0SeY_o6-iCD z`EYGK%TRjRY_)VY47oM)72`8Lz*q1L7{}z$=+#kAvS}(UOz+7Q62xkzAzgbK21eZY z>`x@rnHWGl!Ep*91_cC=Jw-v~uhYUobnO~;x}#3SaFVH?@efyv$C@fhIMnl}l+2(RmgJI;l@i|& zDQYYzGOsf_#Btzjgf7lXEK2Ik2U3RC&L0b?8Hgee0DcR@LZj{ISBi&i6oM^7=H3&U zhYToS&7yki6PZV~f&1L0naAB2{?(=n6{I=@=OzjjoO{zQ%a*Q=#8`uC_pjfXh zh{zn$+EJc5P_ZY%r9HkUHvr((nwu=ueVGnX&kabYNq+=1Euc;nwt6K?-CqM1oz%*3 za=`iRF|~^8_$b=^#g@R2*>qMedePHTl!{jXayNvKv@}mAiv_v7&vtld>Qg=gPN*ol z7_7QzjmX38CmkfZ9%nt%MW`jZ_Sz<(5^7NfVKIEqb4qt>f21V z4cno6NN(*AkRb*=N^kT5fdD6pwmK(gguix1j`TXXCVdJ!c9Shy+r!IT$2GXAf()@r z;j@*$j{!7Nc%ps-8}M;hvmCLE;Akg;B4KuBe;S#*OtPSBoo9lAe!6h@Bby!oNz$92 z&&V%_6wCw!8he%kn!`w_C4t||Qe~X!gy`>FLpx|kL^(I)<6K%nWvC@W+?42#3_{$z zM{g^|I)9Ye@@RWU(dg#D2JfH|D=MkeFD~XWXu=k74GxPQX3nJEsmV#(;7M=nY$1+k zrAT%n$_QVxLCzJFkUS7Mnb`qjPT>Her5@3^td-6~+Z*dOCU4!clPJSD`GiqG!>iHK zTK2p|Mb$BltGNHTQ9EIDBUbR0v+vWA9NzL>{*n%|cOtKqt*={5Ky|?_P_dJRn9ehz zL-%Ag=zLi~3R)#DT%gmzHC6YL+ zfo<&9Z@Zeg6u7SJ^D9PtofzTT5?2Zs+#Gau0#Zk?{oW?Guv0=k7`cfEKzI zyh)*a1}P8V9sho;{>Eq+W;Fwbiba1*lQ=M9i(66A7?!UQ@d z&>cz_vE_&l%cV#A2Gky^om2b(VWEPB^Tp#Nd0#FvQH32UGB$hCOzn-SfYVBhu#7UP9Skd7uC z*97X@SeZcXnGc>DfiWOJ%&xMTQy9~0q2qedTHG=d%ETDrkRnvDhXFc-a#25t;r9q7 zVFob~ z+awH!Xn!BDmIPv?%YY;1J8e5m#y$T?8-g9#js+#avL*bj7Zaje>^~N9XgH?cr3sJp zIfrRU%X;z}c5|`s=tV@Z9KfL25rJP~@vT97%XD06RIQ8foqn~9b)=XHr$=sR7DuEA zFiTdSf{;lieu`$Hr6>y7XgBCD_)K4^6jO(+{W0W?SF1*(=S)r%R+gM%fem z0U*;qjR4)rm*A9Ax(Xdk2F;UNhjvU3)vLdw!>Z*1%=!uy21#8-yZ(hzh@>I^f;4QP z>cV*W7QFqe@1}sGlf{DQ&1=GVh8s8Ds?};00;rBh5-v#OT=^yQ{)Ogf$R~LGQ~<#h zv-1vqn$HCum{dA9$PFDzHruCkq)w)s6UaVsPY3&sN=|!#QzdKe()9IlNd5I|o>j>D zjZN^)<@1xvy9Y7ES2>K_8{6r~%Jl)8kk-A<@OP?fJiSK;;TMAXCq(ljX4ANB{Ps27 znpZ59FQo8%&3O_QwN z&M8ifXK$i}6{B=Z4y1R+@zTdq6M1A*b=TDO;fBibN|4&h0hW}mESHSAW%kYC0b^13cvpdmR zc2N##^c&lgOp{fQ>r^SPpst!L36$TM%}xjrL5N^v2u;^%dz)%SN1=n*i|CK}zDPkh zZXc{MS1V>%Gri|gi zp}s)%M#$?~(c!BHrUUHUePv!QR%TNPCBYEDYpM1}0D+P4q-g=S-24 zIlJ@&ak1Axr(4&S{w|4|DKKEy-=hpjo(_*iem~7U_d#>XG?2^@xyz8%QzizC47{|< zi`pFA<1H|aXK~CHpYzc}>_Wq6=WD&dW_n(C3_bx$Ojp<%)1R8>OO4qDj09=E0%z(t ziw7sMTQ+rN&e1rQKOz#EW`u>*E%|2cW|-pCsFsAE>1ZC%3*@~!R0U~;8}3GAt7OJg z@;&BLhCt8urK#QtRYV<&J$R3248mKmH1Hkh5hekr6#1|M^2A{_a2@awB!Q2ukVOMQO0+wUN+&MYo5`td!1@l{LwDo(^7W~;MczIhj=LAYmc<_zm>;-H27 z4#*1Iy^XKB|CIL7EUc5~&;8f7AI#wYoMQiZDUqQDHu(_|zC$-C2145+J>HVdc$0f0-=7o8+B$QctQ_ivEJxw*=1> zYoqE<(37+yG9s!Hdr=xNMc_%blOAve*Ig=r(GI{uD$PJzC;?01BuN?%{Ph;jCD<(HW~!@I}w6LUy2`M6gkaJvmd#u z5HGSVppva}SF)>CPdU$vxQLa$a2G`cttLi8SkwUG3jInZr1+4cu2LCK;|t!BArQ5JjIDeAi*#hL2yW9{G7{+Js@`kXc^6=%wO^gtm|O?yy`FF&XXMkd)Rqf&F$ z3i$-8PRTSnCMgVao^17ugB5XWnqlD5LuUz1lA`?7PSM|*y~3cnA28#MCLTUyZ;XZ4ifN+L$`GFERlq+@d?H>Vuxx0C49 z!n!*|q?_78qL-3dw8q8)wikcXFHS<6*+a02W>X8MIM-Pr#9zaM9GWjxBqY-F5)+Ir zBLoo!tV&`hK5UL{SgO?LE`z;h$wIgXo0Yz@oEj!rC=HJsJNq5pO@|tqsM=wZ)keXy zHqOa+s>C$3)i090JMa34bDUXoh&jq^#~IAd)(3FmK09r@OVQt2R_39!X;)^UwwIx( zY%F! zeVPn^;nTY!d3_3J@N4+WadH?k)05*sZix!2P3$?t;sg#GHp|l8!TGzjJ*i&0`Pp%i zgj$+q3RZ%a4!S?qVWl{(P2O;Ju{X7suy^eZpLLpT%$1KjF1>>}AHv<@@q7Zs|Gscz z#<}P2x3SIb9maj)ev2=Fde`^}OO(7nG~U!BJx$kW~A;{4&?!Fl6(?aU>j?G=Fqd2(UaF0GFsMf$l?f6wyK1?|ah}IBT2@8YkV^)UpaB(? z-zE8wVj}CNGWr67Icem4D@v2`g3XjZuVC?)@q+F^J;geNa)pUl^6|Douv)#qqida_ z+Tjh!AU3P^p*60cal{>;I-tgo8FFrPGEZ&%Idl~2;J7JWp2wL^GKZ84?Jb3NKW0&{ z7T1Bv>G6c(7J+!= z+01_+h#st^MXvdMA4KD?!S-B5vCrp*tjCNeZMsz2fZwN~&-0GsEcz zES;6{OByEesY4->59>r;r+kbhni5Df{#aeSKB8a}{&C5$atID9MSaE|MF=dT6Qh+L z7^4sx0#vS@Ary-zUP0;}N5}=Y9pOyf@Nd`}V=y|eq`d>E+$X*PjgSj8u>v@J*oe6^ z{(5kD@aF@SAFuSEgqvS3;%kF-u&xqf`GaE3s|6NXu?s@tBSM`c=cowMu`cgRVdvNLUi~{f zxw^3yJ<=X{@C#l5)W@+PghbbxCBjX1e30+&DbR@G{t1l%$v;5FwyUXue!p6J3|N=) zFE>ZOwUgtiosvfoLnIi%s^cny5_6FIV0H%6D!cw~xCje+?xo7t49|CsM?aCQX45?@ z#!X`jEp0uc5scrf;ALew^*ov+Omy6#!>JWXg!5}kjl}(D%cUopD@awGkcH+G2|}u= z`K<()oa}VGkldn>3`!a~jz$d&=H)YHdiIa(B?iaR{l

=Z}~o^@>%eP0T3wL65CUl6`IurZ#0rw}TE=y`pMW zSkJ)QG?a6NE~nr!e?LD30maFskXKaanem;TDA0W(r`1=sF$% zg;MkoW(zWY2V$Q@Lvh{Ano;%N;BU(}chp~U0mz)69&9xrgGGakOWj%*gp7b4D2)%W zj-F|3Tn=WLh!$Z)_npmbM@aeyb<%>v!i&VmLYXBc&@_4*b_`%>f`*O^gVOs>u(NY~ zMF6^;C44O#tje)isdsE>#YN1^O!V_ILOQKc8}p*buk2FOhvKR&<`R|h%~R7e>4kuX zMns~0yBG3?)f*@faY#|chJ5+38nrwTbLQq$dJY*YQh?hQW%L`_Yem5aO#m80QUUw( z&f=SKpEISR7A>Wxa~mM;!5A0$0qoH$Gn<+oZ;r`X(`8Lj_4rCm=L|(f>t&DViY;~U zNSbS58&i&*#%JQ5U#q{lM`x~K3M+V4Q>nXm9Xf+%umY%DBmKzPvm_ttWJ5Hk-hEMZ~Ho`BW=gTBU;~8r* z*QG!O2oU5BG!&Ko2w`=5%~cb?ht5apvv^Z(0=y~A9$d?uAKjjm%`ceZHq*>;g8ae% zz=>8t%kpbA@$(6^bAi%3_lc5q9bI0!8V%weBGe<*FV6pVS*wfYiMAbP+w!N_694o3 zehaNwIVbTGEE{^X&_-xP2Ho3&5tb^bF~KDvvd4c+DBF$^53F;B8km=-fYMvIIXomP zfyz6lgCAKfu~WzK#`WYtI*}hsQIA+mOUx!`Ix(!iOcIW)Z4k7_9!}yvAA?a(`s>^* zz685VFg&c)FqvStJlR5YoQUnnf#izNgM?9D!_`0c)T*YAB(fD3v1OXCKw1~0~WE!buwoXw>W*3%C1X~E+C z#+oSH^B1VSU(chWTf);dI@CULLDJ3luay{W;rC?OzgttMRa_9JUxQ^^(jeY(JYi5$ z`r*F_O6ro@?_6Lt)g|6Rv4rufl_N&!KxF4AV)rXoC_2rMcTT1Q?d1DPn{O;qWFi(P zI4C8eomJwv&c;_fEFBcOnZ+{&`gPi{6{@Wy_wKj|Lq1DkRuXm(Q}DZh#w7d!2lj4t%%Q6IfC-aMGO**TC5 zueOww-qy4vYL~(2s)gd4rQ$=uqI3yX^**YZSt$S-X-<%Hw??+W!}?1`GmyKtUw$vn zI`pcaeqCh`Zzgi_9_0u@xGH{F!?H;2Z$O7fu^PDoL^7bT0#}444Oaj~yBtkiq=Ff8 zS%QUvTCyzW?gnH#uIgmYR6A_(3Cg6+;DWl=wd&2sK1}f;SjqUXL-<}F@FqUQCmLbu z9K0M6Dk=T_FP=HrQJtt_#WOGpBU1###gP-%HCsEd&(ad5HF2h1To%p1!L&-dOC>E4 zSr_b*T?sVGF{BBKDb@{PHuEMEzf3bod?Y@E2ssKQL|gERAO732;zw6V4 zUCa&txk^v;-(Q4&P-Fk@+J==h|Dia3rKGrMrT`uhDEm|36>FtLq9O?$#Q-AUlSGh7 zaxURt*fegl^a~+=Lh`&{Lc(UKi80;EBlsrWPPw!vTg;*A_BcLGJx_ZvdOG-geB43v z;T{`nCCyV5N{e(x`JfLn9Dk%e7*52V{R&{C%@gNq^g{ud$1}Q12NG4r*o$DV(=r9zW+SdI; zgg8k08Dq@Kf^|mU;hD-icOf}{cF9V7UfOY|`={jr6B73Nt1)ze_TVh4M(ifqE^xDO zN$|jl;4#kg$tZ?%Unf6Vj(HTiCCvUS*UA)27`w`s*Ey-9|2$y$rMXImr|psBi4YaC-U4)6(`@#6({*NhwCrm z?Xf&^Y|=?S*3}Xe?;b>#n*fLk{D9)pK01M#ASBaL*!=0$SB@GJ@h!y{y7Hg;CuD!o z&N6u2pO~MgtDLAi@exuI_n7|KZww$xt(M3-G6ZAL#~^)Ke{KAvqhKzg^B%yXLc)7g zqfr!?s?c40sRdv(eA3F+xibxGghs8d9=3hCea8q8`D;9t01b7i<}ePK?|q`?inI*E z^R<3W?BbA!a@ueXf11>da|OyEf9@pDR_;yVfK{S4Z3o+8x9ip*)~uS2kAEQ?Hj{WL z$$$U)WexiOa#2d#8vRJw#f|52YtL{;or@LI;9ln)ZqG3ED6=ev~m z7;KInSNRWvve>52 zfk4!*oC^fuuR7CqZIEJX=S{(k(r@Tm#rcYBH++;uO(Iuo&2-P_)!I6CTi`;7Nv z@@#m?km14VLA%$y0Y6vo*4gR$M4C%)%>rw2Erv6Yv^-IZFfIgeb?_)A$E+J7;b*dx zF^o5w7&PAaH^4I)7X#Gu);pacp(4ypvGXHqGTibk8DTcq;2U~q4}l#;qgyCnZ04zJSrMf)tFFbUESozILCducURhb$(5PHi zscwd?Y`$*2<>YK!H)vviXZ!h2YVW(w}5wo3?j0;z>}BTnlu zw=Yby;ue3x7uo0G&j`eG6s$@Dfu(jaOk@SerrC#dinKI@r3MKc%c>3oTd2ImI1dgg|fdf^6?i`u3NxyZ<8K{HQF~%f60Hy>2K!n3Hr<76J_c2A+*cBRd!Ii;Hk3< ztd)PD+NLmCdrOSn+i%R_6Z!7cMZNSDcH#UXzV@vi;yYT*@#SB8{!&`@joL!V$Dd4T zq&j6Qm!?49dy7>#S_Y1hKa+7BeUkQfzW8i^#cT+|S%Hh0JVv-$PuiOe?a+Y|YhP)- ztURO)oxWfXj5I=2nhAZ|(5SiuOI=)@Z9(183G;8}G)84A#uRzsgY?hJo7?rl2V%u7JKav0QzH&C=^bUIvplgk1h7S zI(GNj=ou3BaVgM;A^n<~=Sew=w>95jKZN^5bc;d%??F#YeGJ#%@1mvgq{*E~7L2;b zA34V#YUK3h81QN5106kq+cT_Yd_kB=JjF5Yc?k9SL?o_9rEyrzj5~|guD{77 zu}O({?Ck^wK^v@;Np-m`+v>YjCCO{9?vorgv{K?jx`io3Fa^>I2}Ur*O-$s@+}m&# z;Gu;up~{+t@P#xI_tsIu!oo1Y&<0Q~<7UP2Y!GPLC5~o}MT=nSE%*@Gg0iK}qG&@d zPpxSpba7xYg9wE|yqrS1^I`Rq-VeR@ROBpi|XLOzRC7gWB8tj20lCp@PDVm&)4P^Ew{E-9=2R4cP~h)SlGZ=9==A zriU$Dlb^hXbwvY=aF8Ifu$uTY)f*c&E}S8h628iXLNa3a&$Rcx0MIGq`g3&Tw-$pt zQ>ZK=D-`uw3{SZUJ{XToqS1(%4ztXjh}i)&gbr6X)sOeHkrwY#IhDfjmlNM??8q`Ub85_DrJGTg($%fik z1$`>9BTYlmu0E|~M3&Hd%jT8yCP3?7*(PEt_bUP1Tbq~~J{D8I;|U)ud3G>cP2L!f zUp-2$78RNq=PAVf3tbP7BzH)s*N{bP`z?hQIOXAWrc*& zfyUOa_yS7a5~VaESm{Bd>4B?JHRnlQGN@c)Y&Iu953+`hTYpvfWO4Z|t8ZoDq6q=W z<|<#P*TF)fLgne?vy7%P`YA(j(kXhBlGRXEOh3ANwcBGc`jt>sU1NHTpR99F*`QGR z)^$-0Hl~e3II=)a5m(V(sOf;HY1i~e8V&C+89y?pkH=gvHdsJjiuzAlxkl)7bV8H6 zST`57)TubKslevNV6o3_<)Dmi0n{;VhpQ9B820c5_hI=@bl-4VzK7VLyEh)FJwyIs z=n}~6oyhC~;gvixcEhezr)F(bl z~CYz&5sHK7{soYCSQD;hlBl+8S&F{Hm z4*}h}Z^7v|$#u@b4(Dp>#ZIS$$5l78ZcG~&I&paaK+%0J$`_NAOot=_tQ>M7U$S0F zMic}yaC1eH#N=EB%;>rW#YQr$<`R>GFAQtPR&5dG@T+ASdQ9ov;1BQ+)S`OYV|tk00w+clx#)?i#=aa_TzdaPvg94NJ^6Kl*IOP7ca7W; z)wH}PpOve8@q{3E8ik*Jxh6q7Ai z{3%u|h^+zYy{k=eZ5~T^917FCiFun@4V!)8+>@kH<-5uDN7ziL!VZQ$$0+5!6HxDd zmgJj#5!gW*x@ddg_-ZS6`Lzp`^cklbHRY%ItEM%7O2iHKnir(}lq$VaC636EyVpQ> z*Ex%o>d66zGeC|4>b|)%{9mTDCPYV16@u1f5?k(=<`}dVl|ES;$wV(5?0(aX51TT{ zxKf%U$o44OLxSyUgfWG$ zY0FnV`KTagyuQn3()Lt(`&H8PnfSWM&fFxG*=dC~yM=A=lgrXmFRY8it?}E5ZRCTj zW)(z76yNWU&etfAK4OaZWioAAkK(M>bWsJ^}Y6 z-_Cv+-h8C_=N~7Chlf;2h+n@X(En4X`oByJC_Dc6*olOU?Tl@V{@cr5oMih?B4#AF zv-&SfXMHqpKtzBP_GXdb2O{G>Bt9BK5ZaOvM$&?I%f4Bwi{^mnJnSx|ju{}~{LrZ; zZ_@2nH2_w0cmnZk_w&QnANMN{S>La>pAiNC>;M`hVdZX206VZN@>V3@NL<_=S%4Ts zJDM;Up@11lcv?_W96x>`MTwp8uyqtb$xzb)mgu0f3J#H#GAYv_gK>C?+ zNdDBrM9j`gbB2v_(_@LDMzX>4o@9vE@ls=RHMR{?r0F7VYfEdpU}nhYg6^Y!7lLm; z*JRb2jPT+`hx}2-`pzQM=H5n)w#{;z%5lwJ+GELIv&gQQYKGRR?%@Ny`BJ3p!*Jfk zy&7#xN9*?PR*TeENY&b#rch@uu7u8!@Aw&e0uc9|rm17XkU7A5^n}vNDX>JS-;^c8 zKer||x7=%Nm&quPOntw+WC|`^Ch1=i$RX;WAx=a3wOO^vkYrFA4e&3V{qr_eNaQDX zeCzczHy`3yH9V$`!Bm}FH#n6GzVANZ&$IpXhrV~{roQei^UtL_LGxka%Ln~_eU{mF zFj`<^7}Hr)fiEysACuACfi#GQ899c^HM|N2@hXPxk(ftpAgf$HIz)j-to#EveU>o4zNeNQ$E%5Iz(korr(1QtOU;3|bk+yHaj^W|~KRf=x zPt8G)rQx6L7l;4MHIvMM&fhoENI-1_=?f$Q3@6gbk@}DyVc$e)zTuN`InH8a{Nv^5a=dy?s|z5dFE64Y zA~TW{#SZ>VG$odnY^OWW6%>qy1O;xd%@3rCEEKPVx0eAo2w`MTYA6?m31Kh*H&_>; zDF#koV}M{tA%#X!LSL~@BcjB`ry`8QiZ0+`5}dC)TXm%YJz1qDGro1hy-9xb`+zb8 z;1hS#Lq8Su)9aX7P=*)R5$9Om+cebk6(+5Pv!C{~vPPAmxQyz!ttJ+Gz z4AO43JR!Y6=(}DOzv#H?;&nycOM6#g+X*Asb&vbux?JUl?|yrah)XI~2);aS`hg(K zD5X0<>2??XbRg&7vsuKSP2+)jS|GfQdRay;9@<|fPb#)kvWOXE2B3Zb)V-d2$}Vgq z37MOBqFLh&5uO;&9bj=7Gxr(3h0Z`UY6dYR7hJwt1 z+IDvfMS{U*lhB3ORr~d?Y`~C<)0X{DRh#@rlg0n+H~2rYUH_`;e-9L@RbL%3kI;R1 zv@InqCta{fC83ZU^3)s-`SQmzh=yQf0PBC-gCXaSYG_bqk|i`!qD#@5wcDDRYj{Zc zo=Sxz%=Af>7n+2e$$@1vEPngT#GcVx`Hnl#K1rDM3jlBpDG!(O5?jgJzq6OZSB zanwmCToC8mk+w)S2qt7IPwldZUnLL`k9Sj0d1(xTdx;E-OI3{Rl|=wTA(zru4y162 zUPZ8#?ek!%-2N(2_QFA==n30Y_7YE^HO!4Dt=Ao9Y*QX?9IJStC{gMna_%J=XT1UU z|5{54B#m+>OlQ1XKg>@@H0fa5n98BqP~nk$>O<&MYH}F=ExgDQ0h|fi4Gp?j(qxbA z4$PU!bR6|y&uBhPOSA~QDYa3eI5wR*{`($MF*r(G+}CPSK6Qt%j(sJflzNEyFOa#O zC*~J~v`tvEPRFB;p>?r}-C})KJWqOK7RrO9NEpv@jBi!-qhhcWxhV-_Nn%Si`!Rm| zY8K-?I%BTRnjvcSk7RsHle=j46L;jv--|})@I}}$4U{Q#_Lv6V;sui^GJmw}c6^j)aOT35D#?uoP)m9!_2X3))Xp%0Q*!9I3 z^$D|CuuTuf#>D;_W`5jYrbG3|UyUCDtxQPsG|p1Rpx!mAv<&@$I9`E2-3^aamf*Zo z0L*5T3`Yu6dM1GuSqS!;ue+Q1dklEQpi^f)n#)YSEvQi)&@B?5dOh93fsI$|%yY+Q z=}Sj#U2Dm9FQRuuHc{A-E+z=Uu4p4rG(LEImSCKjZi0odxhIg%cBT@{g{<~xpmJ3% z26HCKo!7f&?<&mt5WCWBI#be^bk3OF@DX3JnuC61^UkLXz3TW)!=T-`ezYYT*yGZ6 zH7Q8P@R~L2fT{o`2Q#B*g%psUMM-zCwGxTLoTe?4Z>QT5?>a=G_ag>DtjfgC?xUvZ zfIUlv8%w2-&^3ADW;PkDYO;osfLL#cFy^**WO>|54cBr+U#T8z4uFM2qm($KFa#n3(QB2s%^+d2Cv{I^Gjh#M^-yX zb>Po-3`Ciq^hUNo?9bMY5UPu)%vXNqIfjvr`fMl38*3v>EUDXS;RDLz?3abK}R^Tkgd@6)lB zjOtfdxQlndO9f4Uvj-x-0rHFbP~fisCM79+DHUQ4SEPa4za@CBVRW>;-ZoE+DYt`8 zV^xV8+BUy)>JPTeG`9PeTEl2xfNndyA=I(N+1o&`%N;JHo*%49xoy;#C#6x*c(X!U zxkq}Q80VcqXAwN}i4o?qa{;C{cTMQ4@2zM(CS2Dw=o7#@(stwd38lIh(>m}W>+Hv} zpbV}z1P{tpDtEi5#Y8-wSixwMUqD9uhuKEw(;+L{#miuZULQa-t-~WkwkOPt62ZtFU9L(l^g|RIiy1^>Su@CP zqQ_D-DS?ZpmguszboiBfZL!0L9P7F{pzfvMpU22{k!srB4rNWMkX=!PV{nkuSzQm^ zJ#L{>CkJj&?ZGj8j1j(dDA+@EfXK~NIy-;5;acS=<+s-NjJQ|)|J;k}HJc!i|1FZj zkp8E85$*qB!~5Sjq5pTG+oa)bkfDn9z1cjLLZ+XiJyKuKdOp0&`s-#j@<#%%rtUf+ z_9oL%8rycejiu5Z1>hE94-R~rOCXD)g5Zp-QWppcTRpCFkjEnuObJay1x=MZx&7MR zXxY#x4fQd%?S9>MoasINpPz;It=3PuALJESP~8{3^0+T)V9%U4-cVncy?!ho@&njd zF&41tO6lVZD&57fET#*Iy2|OrIO$aLDs{zDEUM*YOtMMVR)31gtjmRzm0=mt)*<>q zbwP6wUmAN;G97k)$)OrG2rR;s$YiZjAtJwIqR?r&67Gl~RlDVj6r=U)_`_wT2F!3f zg8l(bf%aLIoWcdgfcw`ltt{6XkLc0UJCd0ik}DwdZ%dE@<1-vC)d=STt3z@l=x?3L zf#m34*eeRkotYX#-<~^w*t!vH-00Ew`HaV0xtftY6An{z-oG!3tJ9!X?<#mew< zYZMDa9)cLzk+IYRo&rPx!?b+^5z5ts>}^e(M%mCbld=RHT*#=I?tnfvCNiZhhU>pF z8-)sr3yG|sM~gVJQ{x)$05NLk6w($OM&`nQe!okN%wS(0^g>uu95}PsDZAod&J@|B zEG}VHwg4_%jkI9CITv)^tMrRUTJP94u{KH8G`_GwQdZ98eb|EaFz{%Jkr*V_G;2n4 zG_}>wVzwNK-A+ki_9$zM<%ID%$CquRDKlV06I7}9evs?2Nn_zubBnm^Q^engWxM1n zm;8+s-PgJrD5Pqj^vg-uQtV9nylwVIv18`6SDs8IYN^l4deH}maoX)HA_}%MlVhQB zq|=|#PM~PLo6p!B%z{RPWq^CLl|rf2O`{kuV?jdnN*GX3KIn}{JAX_f4s+kQ#>obL ze0>HWDiluYgGEQKnOB%5zL0)l#cp&Cc4?UK=T%GFWdi~K8hk$R`FyaS@%sgj!P(ij zJ28f$7!oETz*(0Cc+Q{}OrrGaZxDw>O7}Aq%>_PWD0qw3{kM>zH^*!p@8rIMgR2N} zxBvyRzVU@Z>Q;Qz&`b4{9raI%s@PdBdJdlkkNM|lb8CVtOZKR+k_qUL7XpO0R2a8` zsLFsqE+g(29n`-jHkW7Q=Z1h0weUoIlFf3rFk7S>#<4KH ziL|;lLFNHoJ0P^0UD|FHX>MAh84E+%*tN8R8!I+c13fz^QH3309<#h8l^oS1j(49y z7gfrJR9cau+lB%Hpj8~YcW&$t-2!+Fo5|y#g@c$n;S?&VN_7*=vzG!lzNicB~N$M4V|YIv%6QJA2ND)A1ZwsQ+t zp+z4QPLYr#%U8p#`}=3JKGAn`p%m|wYM$Lf$y+6g>H-*nDA%Wg3R+7n|J609nEhcs z{Gqnzdtb2+@;d&VJ9&-b5vR|N3co8pnB&}x#D;pq-LveC%>%_SHFG`bo9StUKYdi2 z?$_{ECy-q6QT47sS^#89uS;=)?IB3c;F1$YLpQ)fcWNBnI2Ygd?PQsDr$e1FbomZ-v#nqtd^(vu!AO+fP1~m3Fu%Lp9q?zDXJC zbdc!Q%qJEGKI&2jQZhrdF1}Ff3dIn=oeavug;&*&zgDNm*@kFt39TWOGy6}d*1VU9 zP~%ozzcdc)fkCpDW#+uNm-G zE8%h0^0rkNb-4z;>xlwG?`e->OTO=rdg%hQm_Xq^Db_Nu3&MUfz4u$cgCSeRwRTj#Yic~4zy@(3SIU-=nu%Ea9;R) zPCZ_phx5M1mR~M1)}5&Q0Xo?aKfJQz19wz_VoqErH^9~ezP_a7QigVNY;XgWA9ij!w+ZPL+NZT zWrl4^5{#j>w#^lm29=68`Mvb1ab|yLke0?N6A}HoTHB(R{8p>fJu&}Qk=AN3zvc+_ z{6<*p5Gu71jMZF_uttNvqxZ%ZO}*S5YSAU3V|;l|jhTpkc!U3YLEr9rMCXt7INTdU z4T23~!*%(gZ%KM(v*6;-1}l>)lx3fKEgIHCPvXHrx0WfUE!zrrjM^XSsg#p7h1YE~ zqNm7uQaSbP`#$)E!U%oAp{Jw77n8B|V0-^pnQO<6HEQ{iK;Yo3?|c1Y0{^eo&a*p%@T6F)9-ZB?8DxB2tf zrTeEwC19JHhyEM|JtY7^sNe zxw}3}Z%rH=rX%i*u~Lak^izW4fFCSEgOET&s$`O&U`m?pO_E?Dbl&u9yX&Ip)Tr+D zwV#cX#6$4(c`rR`Tw8a!+W$*?9aeZX<*5$+MUCio^z-Uxo63&*#-THVdW7OGGe7syz7WXH}6mv za)?DY=TH_h!NQ|AaEPU5(JC~6!s4SgaLnSPH-N$N(G@~(_3|f#-ueaVr~c9n>refa zn^>&wyxj~e-?CkO){oGT@A(_3pWhX`vFVBbaLj2%V?44kO9x7_%NF&L3Fa2{nhE?1 z2Q#dmfnW4L|o;g&01>^6kufM7ARSxY{U*o9U zOPTV=-BjI5>y?ubDx6BF{7S}k%QvdVd4Uf39OPgDMkoWGz*!nQgBQ{PT5yd?20pN9 zlw+=3ogiSfV=waD$?qb-gh-qrHHe&J@9iECispC)wz||H8P<_;fE19zq$AclNj>-o z&*A^77C{TofrMa4;V3}5pvGKTmpcYR?tE&3!Z83+5>Ae*Fq8ZyN*$=_<-dyn$U!oQ0R!E03(*Dmxe5ew zMD7ZOOu=*L+Bge@c>|F#7EaQc-NAfOb$O>4$UofNC&3x)D6MF0^pjdbL2)JR0VVXKhZ2h zXyH7tX>@l)0!>KwWu z94Gi&+IQD^!^2;4IqQ5i4#F~;e_C7=|rDLFx^iz zKWM$e2||xLy3oDDcAOCR6DN+X!U-hI<)VK#)H{XBl|w_1bUK8Qs&wvurCuCPU&7hVpx?wb3q+=GHbgTA$DjUczK+1^|(N@`nvVH+=)87bVB?FsLMCL@QFFF?~zTHL-3p|Zxl(FWAyC}i$8a` z@S?5D7s-Bd2jL@?(lfGsdMEl3OsC&J;`++Oc7BHds4>W((;u2@_lcISX&A)6U1|3T z?06#+b9qJAE1zgdd&M>LMBraMd~cOZ!QYY6^$o*6y(9i;qx%jrKfN>7BDq7^D4CGA zJRP6$;NUMQqVCy=pw~QA_elGJNWcVc0bBy{vmL!eW?(7U!qz#ia`X%^)UPVxy10Yt zul_4g&=XR@%p9=wH4X-w)cUU73e>lzLZLT|qGgs8Gr)s3QW32(U z;6OaTQH&>6PkD5P)x+DB)&P+F6=mzi@|xkMITRw`km`~RpUlrRI{nc@+dI|JF4jeD z-$?3m>lii7Npa;SKN${6@$d;0wT zUN~guQH+~V@WfPDtW;!+e|%Z;I%gi;;%YPd)Zw^Ldq5WgJ_-tf^In8*_SGH9WX zm8J0DT{_(KI|$x02209bhUIMfDfg}H^;7pRT^sw)mG@2sqFrI08J%A@K@d@nM`K+K z+9)h}!`S8UZ&;=5C`YSs;X!6aG%Wn>km<)GVTHX}^#M)^^P;USUu*r{SlXqPN4=ks zjPxzPCQ-3gA{A!W5uroKClPEORv>s-(G2BV8AU#fy$;z^Ape1C1fo+oVx^*;LA?1j znVGawuVK6;I$wV5O;nXk@v^dBL43LL^|bV3!!OM5TqPEhsV}%5NH|=8fU6@Lui}Mo z4buz~>j)a8C2eGew2PGkMA69K(rj_K#|yV0!u@TtYv;X^z556Im`As-kQI~Xl~|akz>9LH+o(w^8AZ94mbyP>YtJMWPLAnVI2vru zLniM*XeZZe%>0mEm#wnbCBD>1hKW>KHuW~bfOtBK^t}7yYavYuinnPW+$?!JM?Agj4osHi7QNIKjDicGJ`BFB+C5ujJAlNx9PIqnB&VP#G2P&Ennftep1t>URVC zy6A;Iu5`NwLpqWB!~3GFeZ*%V|ZCriJ z&GWY{lY$B&Nuvz?)vRH%#HZZiTHShrs{E}4B`h0edRER@iRm?Qv2mq!r6j5JKTqTV zAA=k2j^!aw62B7c#1Elr=qv#pN4bxm4~$WVY`X06qu=3@W!i~a5-n~$fh;_lg9POK z`+jA$__)Qz^soFD?Z<>$B|-+h(=N>v2;X6& zxZjy6PY}|^s((qa+Ng{_KdrUCuBEOxU1VU%?BOeEl;EK1nI#Rku(x zF**VFzPa#q$o{!X523TZkPh~7HT#WE@s922xIqH~v9D`tcrSjK`}`oL&+sJ7^_`k? z+3ZVKyz3qe1_A=#GSM=Pzw?Ti+o9{~-y2doh}g2OYW$o5k-rTUq+{3e1N0oqcEK)X z=@heWOd2+1XiKAWBKE_sne3AyXB&eoBfbX?lXn5_+x#^k0?j$OhC3x{iWdwC0)!h+sVddct}}peHQddt zAA&oA8`YHIp8hlZMrH31Tv9$gk$7d^*M0Ta{<*5NQKPRK%TbC?FJAOgv3IgaCuV7Z za5W-sjsZ4uUpm*THrq;QeQV%JI;W`M5EKK(e+gx1G_J810o}G8!IwodxL0%Qid;#x99Qzx3nb z`{n#m-^2v`v97*Z@}BtR>dKd2wB%8NMX3j1%26EH6O0lo9M(R!tSqP-tIDJL8Ybr2 z1--Kdh3ncqd%Qn=(08mIba?wrh8gGbGm%oic zQ6Xl7WJ%I_%x1bxS)JW@^wXz33MlKuE z&}q5R!9iL)S0QLpTeqz!n|C;Lzy2j9h0}yH=Nv+?4+~kGw_PQX%WAJdsi4!WHn(!M*_uN7Ss}ohNGY@kl z4m>7XIVD5EdS>=FxKhBqY5GAa@r{V~bc%Tj&F1nf{HdTcj?S{m?qLB+Xd?)WJq*aa zQN0spMH}pP+SBS#1Nww_{&}M8>Dw!EhaLe=t&S6e=v+G2HSB%D44FQM+lc7finoyT zB;s-IpG$2}v9~j9(7d?VEsE$E)6O|vqIPBqK0n&SRn6|%4KDdd$iLiE*_=Lkp4|qn z#<=#+fhYe^_FVR>+AABuwET5uf)D5ONHUKrVz@4L&TX%^S1_GQDR1ZLvXA%1+Ln-A zBJ6TY7+MP3h<0pcjKLzhn7Nq|A%hrNGD1h}mECboQu!H>k~A%RV8sN2rp^(r`WB)W z+?ox$Nl&5Os0lus@Pti$0=y;YeD4|Z5krafbCQ8upV?d~ZSEbE;d$Ryolo7?(FK8+ zo6F+kEZX5c#w5PJq&u}QNDWbJ6Rz3qoPDm*kEpEZnTnUTwZ$$TnU*kBMW!GN0pKAU zSI6pHj)RgD*q?@4m1D03z_H!~^YiM?DV8Y{7vGOW26D^?7HvvXJ9es?9usx|%k0?b zVD{#tU7VU=rb8upWB10wY@nH$z;w3$!=DVY?5|H3k*GRz_b!Yux9$=4?nd}MG_!pe z+=!79IvX`HwH49G;TT(p)k!aT2jPwQ^Db7thLX^=ZJCi8aGl3!!t;(Nsv4ls>ExKV zh;NrCYIKf`_1R0HYrdHBY@Lb1uZseg6%M(VGhvab2XFS|IrQt>`@f1GW{Dh$n=2i4 zGd_-m!2YT0wlxB&b5QIqR=WgbC)>i4gTsNkhU1mlgU(-mFti3ne)K>KV6hto8bg)) z@QKBoh!?g+P7EXgcO>3+hsm?B6+UQ}{=+QH3jyCQ$`0R4P8oP2-Uh2LJ=oSg)NZ#p z;aF@WDB0sXiWZ16!r6)hh_P}vt#mYdyCz2Q8(7KjJzt$`7!ID`i2%LO+E{r+2pRs> z?J4jm$=i`Ky%B~nv>6bt_y;Zt0mZYFXi&0}UY&){I0u4?e$)_K&A$lcrw@b%DpD(t zT{3np6*RD2WjZD>sk1W^-`V@||A4Q*9lM}OzxT51k4^X?2jMMGL6|Y}ZORB|5!Vqi ze@5p$N_S!(LtyP?1hKp_1#G*`nU`Q_S=>UWeELG+xxu*fe2sDO zvNRR@%#lwUvwpe$6{C+M6u#}nK3Vh0l{;fmF&i~%4%rTD^XcZVAR)1cJeU8?BsHOP z5Ki8pGJ4PIWl6Aw%_|AIRi(X6SkAzbL+`)h18$t!nX*L*4mZDJ8=DF#Z z`RgZDWLfnjPuy~w2kd%NGY!ZemcKvTiOYgM*ndqOhrYzWPWC-dmIUB0Fe89> z2XMn|=eLLw2<9D!<-4_3TaZ_l*@SYt_a^7>;W~IKj={H9T5Q0#?tvH~z0mCv#>shL z!Kezs+x!MmfP!2caJW%Mq2Jwa-V6WzKrcXN-QIXY*xK2uyhuFAYO1{O10(Jf<4^Ha z9QpGuJ?&|udubBVkMUR0e%=GZRu@UAkH1xksNy0q+*N}Tk{(4FqgigeJ zajJ{Mrrd_VOzYy8;kk)!*5O+{wc4FwZx2OxLeMo@QK+c03f6bUn1VkjL90~QOCeZF zECN}~-N2j0c2aPSs>dk8=OoU5YlqJewe)qWs7}gP_Y+x*kk$1ewNm=0n<+w(DF`QR zjJd6zFT3>vlSD}`Z?d=t6r!S_c{EgXj4UpNT)6>L4hqDB8EF373&a@!rbcLRBOZyuev<<2rY18F~0CoP3}Hnc!w75b%%>?O`R?jwS{xsD;&*q?KtVi z^*&n;SSNDv9eHmbOZMylTiv+Ps0oDmo@v-VWR)#0Kn9>ufvz=OnlukkFD zM=Q(b@-3G!iMJ`jdIzc)Cb0mp4`PBP9JqOCIAZ?X`f*1ezZ_QOG_)+`2IG0GlVkie z@7Lg0t{7pT`ob`0T#g*YcFbV_uSlzhcqV`i@j(94XafQiNNljM9U9AmWk?rTtuDKP zQL_v#O=a$neoPUmOqL?;pTufdHk2{FycKN6;m!E^t1yR2Vrtwvhtq_&INEGSA=842 z{@&4<&^oqSW%Ws3ldpj2hb2!fDFko0If(gJ$N4bs(1iG4bpkPx@3z%JGY-n~iLfpk#UH zeRLkKksP=lTL_l$q~!fv;m-oKT?l1yuJc(NTxZQtv-4Dxg*n0_QW0=1O2oWW1B)vi zVWdu<;U>7v(XO8PEOh@I_pk!VUWxU6K9ah(I9}Ak`b0D-G#q8h&jX*&A7KaT zMZ%~8diUHg2%7oA*?Uxe~5H_~tXLEw#k9gHt(O0TaY z?j^`GENZOst7SZ$9(uWlxWyiGJmD81&YpIBIEBliC^~wW4G#64LltJ!Qh%=&BPx5C zVXJ8Cjg=$C7PZ)3pb;t`9qcVDqZ*Y^5}@px0d%5vE0 zosMH-r^ckp-QZk=_N1y=#OpXyoq#4GI%UO>QIQmlO8F4uIHY6c9){`osfay#q-BA^ zEg$uwF{ka1HkO!Jr&hIyf6(*qQ`8Z-Rfy;;BTo98p(___rWIAC%Y~1Tqnk9Giod&r z>6=lOjdt(Rz3V@X~lNa#S-52 zn?*C5YxOK8zXKrBIwwi6)*v`l?o-r8<$b=(At2G#56x{(N7fAt)Uih5$=Qw-;GS)# zbAP)iGeV&R2aHJ%fh|FIVs5X^6x#d9T6LTATN z7Mb({-2BhrkW&(2N{O6I%wvq3D2h7V%%MoxVmZa&S_CmHOR}7S17a?y8^Yy5w}{|_ zw2TGIQ9Jf|F444H+EBMg(&V#XGpAWfbY(tws7u5VCs0C-?5B__)YXcr-YcUK$;f8E zscd+U-Yh*MicGM}AgQx~nvL>!JpXN#_NHG7h%=}x#93hbf!uj@^Rp4_yR7aZG_I%| ztquK5AD9ykp_VU%btjS)w$de~g)e*4I4u}tF%6#Cc_K#mIPEA{RFM&~>3K@pu}CS8 z$w2>Ko!TNnt1ua*n#2Q@78nDTq?%fTfQ(g;7V(|!dX@@;3bgSEK!sj&q)QP?h3&Wc zl?}P6`+ZjM9F8SGI|CPKz$hdgz-Ybx*Damra|i+K8~z#C7*Q{3?he$f|o;{ z$NzpvyH%hw7H;TRd{;zp!$USV462X~*`dnK7*SXBOn4mGCL*ghl0?MQ<{A~}!pX-MGyXwl1UA{b zu!{-kw_JtBt_#m}A>Q~sH5*{wFYCd!5q;eIs}+WC7;eYjgYW}|eW2&oi0~F!F+ld9 zP(8xZBS|A zbv_XV&taHTS-@#9`;>-CC!`UP;uHr%9hwQ)w*s`py0s9-QqMZkqg3C?ioISEP10ex-4ky+_ViS6cZS*1$x!2{&r0H5_Q zbCiwqu#@;8XFVU@Vr;5ti;Y9O5Jr|$_P{}HfdOs8(EOoW4=9#B zHV&wD1LmPQS`P%Bz~_G3R$RP+&>hP0Hog9=8?LVy9Xkx4;Eu7)CuskU;D}gQC_XXC z`vfEKE3^W~oYWwB#i$MPqaEix4Ih8dm+Mo&8&TV?G%CJ6dd7`v9mF;w`;K-##5S7y zp7);WheBsGe!O+DB3!aHJS2qE=nX^cXV^IDy8$qzxcP6{4#_>+Z!iAIyP;bilF%V0 zv5EN5^h#!<7#==!X1utfiwn|sf`p}M1t~PANjI(w1wg?zLHyv5F+ zQr1@V@!cWgdKsvEWQ%nG+k#<{xRi|n}sA5v0;7kd&de9`u#E=uxfky257!u6}S33eSX5U zzHodWcpoodLAN~zitoEhFb=%D+J&IAC7Ux(H^G<#ICNm=nHXWxeKnT*QbOun_w4Gu2V9#-EXvA;SkS^Mu#yXG5*Qf z=eBH%QMTW>VQcw|u577N%u-eI1;elCmmBu!L_=D; zc#h5y3}uL{D5iL@1$PKxNxKsgy?*F6 zuPxDUOaqalgu-U}j|g((Si%vX*_4XxL);}S(Snr})~lUZuMliyaplvpfCq8*u0eb) zcZ2~5%)-v|2w$jo&IT&Am=l{wpKF;OKVf>3B)3&y@<$3+ZTRwS4Q!>Z;Yc zmOJ1@hdNFBM~=R&l3ST_>GVLv*zFf}=BhEy8b@{`4PS6t0$Z^3a^WpsZ$@ujrzsGE z>(XT{GhlG}qgbiS_FEa^?P;|r8|6s3w<6cX8^UB<)ghuXNcj+Jl|VjV^G?M>Pi^3Q z9?pKk^~PUEp3yf@!6a)m?$M~x1~Hh4BH}=7H+gE$!!wyfGFzDOONU1&J9gm^f8*iBWje_Da9x22JAKLLy3>7Cq9-^ zh_9OCFkxKcHIYeUy;-=z3<@IC(-nP*Z}Lt1*Dye&3jl4EsF2+9sNR?S(*sJ-Y&d4hfY>DmfRzwuMm z?R_%;AFU(lkCZ5yq!x`-(NOazeT%;x#r}nB?qJJWl7hgyA&AAe0A?;Am~cFgK^F)k zoyb_LMj5n_ST|dV+7~PZvr!rqS~U)CpgjKN@ttmHo9pCm3U=WTCpbmEV|?M;gzx{A=~VT2)|Mmt@q@tNf3m9mpVIgLKdYLu zsf&xHow@UWg*$_lWTn9v5wjYbDjqBT2JX9Zr6T$p@=P~Is6Ag zNwLy`3>Sl{1d`d7CJP*sJA>1D?g3`9eLYbGyU?D?bv{R#3wBQUZ|wKHT%%VKQQbo| zKum@aZ_(bb%pPmJiDq^d#H>_FWz0&c`0W^?u`ZjI^_%g#Q-F0*t|aTZ&wq`$jKyskq%1xfJBK;>u67VRt-$}n_+F0ubvDD$2niS6MEG_- z-f;eU-uTzc&3!-ar_X_4!2ES3HW?kd;jtPmzTvSLZQsF2x$6kS|HnbFKbG`Eq+g!& zL#BU*w3B2v1z9ioCL%~*@}((AowPFyl#4&Z(7pUKlVd?|akGn++ zA-_Qa>JR*4>W=X?@d-4~83bdJ+hGK55AHK{2hU7+MBw)f3~vu90%weLhMhUc_DN)b zC2?_%SnW~x3^D_8|E%UFnInY29s!-ZNL~bUxt8N0T*2ZSZc?g8%3Fyii{8I`V&WiI9#lwo!rpkmQcyt zd}%BQ9j=KICMj_UE^#-=iUMXcH~f+%4_lbfR3#ixL1=5kgF5$(An7qTz_} z9Rzv~MQAN!?Wz}J-_yZPF50h#f>&13@naFe_!lrB#6tOq5h~Gj`0+ScjCJF=a6@P2 zqMLcekfd~$`+8=-;Rve5$m0TMw$*~_E0gsyV`f~#JNH4_Jre9i#}U!iwE`LNc)mK# z2L&AG62}FU79`X}$5?}iVh09N?5Ay%U5b@IkCz1eU?X(&5E+QOGD-1J(;%DwM)5=M zQn|RQzv-ox&ZSHYpj51e36VH@g|cG;L*H zIGjDFu>m)!zE1z165gzsDNiwlv2c)eK}$h^Ocqji_ro|;RRf|hmEUF$OKno_5&Ceb zAkEe}jdF3a+T@>Pnl5M;Z17B9tAbamLRi2`U$drgRF<|dE;sEMQPFPGYTv7Kj4YE6 zw=5g%XnN?OP_5{iLoZbdmDhCX9yYhF(z0O$m}b_W>VBBh)>1Xx?n5n|&<){SDOxrK zyaLbH&|2K|ydJf+3?^I~6MLJntwsDp&O1gS%9GuU7vB4ux(sKjC+F=2{4>6;S?i^v zqwnU%-vW#hqSllqSQnA9B57tuPmqWk@;Ak}`23fOyA-LRNp~4hT%gjto4`d=%*mi8UDl*vHp`{7DDq~O>&fHi$eo-7 zQ(8ynH(Jy54pJ6Er9tXnKGVfCKj3X7y_T9uy3V!koV4_$CQ9b-9vcf+T&26yo_bP~ z?F*0PHR>?~_C*QvcNTblMMj&7Gd%5HzcR?HB3$UX|jeou8~Z$`cCi>^gIiHcDi2|&iAQR7xJVh=3SHDwKBWK zzOZ#}c`e)cjP8SKxDmHd~9|Vui9u)S#LQvMr z8P<4%UiouCJPD)ZLVheX)|8k-v_N?RUyOZGEFX9JY;n0rDH12UU5V%(cbc9){S}k4 zv&Is52}(YbSNjcMvv-|>cdEyB`qhR#lhe!JrHTEk&2^zPyO*km`p^MA56EI_cuhj* zq3e7<#j;LRt2%Dy@V?<2z7gm!1j-tvZNqY0CP?%7`b`72geMN)oXv0S1dsQKov`fD zQv2B4I_~J_Tg@CgeyCmp(io|I}sq}{ewjmz|l(!HW_ zx?>eX2Xw5DyCsUv9S8dwA2c&g*G=BMNtpW))iBpKK%N-r6fslA42R>rv$EdtO}wV( z8vE_u?3q6E3M`8@ANWn)@(V1JHXmTqzT(qIv-gqh{ML^|*qnkQakAZQQkk@4hG3aK z!B}etUXy6|o{-!bIXibSpS`ps$uDTe&9=WmgN(q4z-PqjqqkZ$F8vdAYYZN%gRp?B!K_pQs>Vy z+ActjzX5Y`v@gCyrAehdYIzw=E7m7J9mV?CCg8GrC7H|4mRz0j8ud|?o^){`U+QEk z&Aw$>UOquC8~K;R8|MkeM0v$SR-LTYy9bysylf`N-`ZPtAFKjbgfEf)q-k|5`Brmy zmxK6FHyqp#lb~H^_HlL(@a56TzwgKJmtBrx-1#vFyYsdELioN*4#cBH`aELoBWFJ{*6uJpYr?Pt3HnZyTO|LSCO!ishg$qKl%4xbKO*vb_OV-e%Uec z)-*yR?E&o*@a6?Ypb4W&6U~PTBBIY^^b^#oydNG!G_@4a(U*M#Ayh+*@>buFK2Q!j zc&9|nDf-8arcXJaxsSPzH`b4zM(FLgK`@6;0WszbX?@`Uxudv#DN?Vb6J?_;}-v6BVf`{k^wT`=Hc4Y3F(S>7>+&EO^Cp2vo(Y zP2{q+%8rv#r_}GzA`ATcIz^@ zyK8U=?(PJ4cXw~x-Q9va!QE-x-Q9y*Akawg!0_Jp-kSMp=HB_HYP#xFcmF(RJ$s-1 ztY`1FX81TpA4=_ciT*^Xl2rMlW}TQ>t!r4xLl?R%(ne6e*PcA531BcR2Dml^EZ1lf z){UTY?Bu2gX(y|ntuN5+WJk##D7(~3+;j*V8jD0QPW?ze0_d#^VWpwJ7(Tu_tiBg{ zRKCu>y9BMOhn+qRjS>)h6_}S*H+o$dNBNKkx!OP;t9Xc^)Q#0Bv?Z;FWIU8zh z>v@MhlTr1}zc6v1oA!o-R{bBohfd#fH0_9NW!TNpGTddm;Q8>pLtUZ9E#`;hC=%H` zOJ?B+7xNsEPum9SQ2V2H(jdKc3U^8f7m-SmJ^C26Db>{j zf>o5w@#bUgkpG6?h7PEkTfVtGPKO2s^1UY#+(arCT4_|fW{_<=7iRefL2DzuO4dr`X^_o%k|H0a?wo+wCX#f1FF16l;t>UGk#Gzusf=D$b8z*TKWw^wN*)Ot4G!u#n42&xfnImI; zSUH9|-*SGY!Kqm17=EkG*GkODNvh1()~Lw;_+M!VIbWJkx!pp>(^5deCX|rWpBTs} z*NKOSml0?H0VY8L`HUjf2;n6o>_kN}U0Twe8{|cC7e=1$X zss81Ji7VhiK=+LmmsttZ(DPS^^tzfs4k=pfd}tvWM8D&@3myvtU{4Ave1HY{@LU*5gR3gXL|z<6w!f zcbm}fry5a!JV~R7lo4RDVng?L0@S9DB^E8%0$NHNa)o*8Bw*RT?Go!3PJD3`)RmvZ z(@OB6>77OeML|O=78?C#PvC%ePr~h4&H1xen_Ar(fy3{RzE2JJsabt6Acd`b^_^FIbVsz-L|2*k8S4R+slb*`qcC1i=ary$EKXoaMLo+^bT=g39GCcAjmD9PewfA@T{8c7VBFujGI>OjE6z^i$^C;b*~2dg+dcg0)@w$ zPjePS$MoEWFQI~jB~Kq-7-`O}$1Wt2_%QSJAAHP$a~GTT2N%=&AGOf_TPyZ2Wxu~8 z{KF#62=nN}BCUShjxz-pJc?dMi$r)$J~EIbT+BgA9-K-qMu(_Ttg&M~&gbiadQGd1 z{jyC}u==KVajTk$ImG_*MzEyrrVW#=ea%YMijL2^P07`?oco}eCd^&JpJ}e0u9s(v z`*9RvPuxD1rNa?-t{+YZ`dsoJi)3L`H0;WS((ubP?AnE2;N{iJH43fapRjC{PI1F5 zXxLQ?wc&}X=Ip|bXtM>)O5Onpe08%+9v=0f~Bdq{) z6?RK4nB2?}vzDn}ORr?#oSz01*@po>3)@w&kJVMWZy(fq@Iw0*BnjnTu%GWKHQWoz zhz^(x@wmppdQVyoXwV$?S*zRku8)~xu{t7Hx7UzChs zoeJL|SweQ9BHhoVeq$tDf_|YSJ*-Nby4OZaMCy7pz=@7h&Mw0(aiB@7Zjvt}xlfro zvMZh1WrrV!#T|`Q_WhbXb!0rmW}Sd(cTgb}+*7B7_f>7zEA^*So6O?1Wh(x4n#?Vz zL+186Tm}JT8?;l^taPuCSQcZ}0=c7V{iUaBfpzQq$9gSafJY{(8Oo-u83+lb-I)N? z5&sr9CEZ>nLMS2YU?S8J##QF4iDK?>*SW>hsarvvFUm!#;>s`dpg37?+f2CzG^ezU zB07{M?@0|VXi*Ft?rDAdtWI=S{-t8H#f*Lih3Y0mtE)-me4e-CE*#)!C?-i?%f))Rb9%dJuCJqd}?;EmlJTRdvQgqP>gD?rV<)pg~U7X3F3s)=yvb z9MgVLFxXe1GqIU|GU&Wb!~R4poV; z5p#FVRN-HQ4`J+#{aCTxSaO(t!d>MP?SJ|10`$G7um8F(>Whi)9y&+UjT$( zP|iC0obs=;GY<&8H%u!$BYMc_dpcKIYYqy&nZuUGgoI88H+-uTi&2n&R)O1I#giR5 ztZOc|GDZV#7Oewh(8YLb^3&7XH?u7{0H%a2U<3X)v>;MJENe_ax)v z;QL_j_c4F>*N}+;{Q)WnWWEnZDlyJR^klkJXz50^G+=o*evdJ=yc~OIeBvhmQr#3! z9&w4t;|*Ifl5*jPw?lBCi^9NmL72LYOK*m)#RIgJe=4=or5~w8@amN=>R-jaUoFQK zjoQ*-qJRv$(uvz(I$p+?-?!Bt6oIfU~!lDLP{LXyI7XAGU3{%wtFlv$<;u0%WD!ni)k-PQ-e+!d$v$S zn#N4OzNM_MW8&0YC!XN(lT-{84rsKs*NbT^X69N%)d{oKjr;23*+NBpU_3v~wKr5? zHsIM~ECyp*Z&rS|JP#}T1(~|K*_k06qqNF~J85~mHKInyh8uQJ0?pXOy)H^pp$uD_ z@Ji0jWh563D$iPsU zTEvX8*8cu^4SA(e?EQ#3k0J;Tuewrw0{(QBp$T>K(BLG{-!U^v zFycmRz*UP-b53?k`<_eiAi2kJ(1GrF<3O4Z+lQ&$IejPmQinH^@SO4U5AG^@6V8sFnqoTf0;o->b40X*tle-RD_@{l8ivjL{ zZN}J#RfLe$7KD^PKSWFKePY~=qu>4g^F}(VpDAf9AxykXeifx3Wg(qgp=``hw z{X*evI)JgfI6*-{8$O5nQ^o`zao-74m*}5o%3ob=KSCyU5KHy*oo&L)s<_f=6T8eojM< zk;kJkRn3?*@47-qg7oMCY0RBPd@N?|@#k2nLfg(ICH!BTO57%$LTdxafal?|Gk;Rv7Oj(V%b!;yB;UHDGy}gxf!tYLiw)2RssSxU)@8(6i%E&7aC{eED-NEqT=U zcoqd|Pad5^uYqUB$)DaAGP;p|`xHVx6me@QGk@y+%^oKVRibRE!m@%=aE}(H(kErE zTW|ik$jxHAdIv&KU-z6Lf^$jt&67Z11^PvK-wp4GdA&MX+yA>x2~8|a$Zr-)x)IM0 zhekyi1AvT|mbX6NGs@PN@TXX{NzPGnf-oMMqoXLoFpH`0DfP)40J4_vxhst#=X3Js zP6+m2g`gTjIQIm3p{#l16r+^aOwH?NevZsno4^y;g&SWZx3_RmyD^_QcUd{z->$0< ztWE+Qf-jz|p$LO(w=*(F5KUwBZRz1TvGK8XdH;~Aanh0>1G~d4NHKk4YJl?JQn9!b zG%H+v1T4%pnA}v~gHC|x-y-x$9hEyZ*FzYf)({+(R8*KsINN8=_c5k9%gPB#FvW9{ z3G>JkTVwCFFFR_o+B!YqRO|ZKuz#CXRm6m4-S&=fKAo`w&zvosj#TT%YK3P~oif2R z2US9Kre}&2?c1F6rU_~D8o8;~wBD>54ng2mp@{$ZSEp4mp2Enj871ob*UbMhyp9TWFQ*}U2LaVIcwB%qDkrgGGJhqlyqhJu5a$OID6C7^3}Ul)oF)e;HGLq3US z2dHS+IKz>LNI95*$$gKGY7oh?R>?vm4{`Tb?(jDeF>#it^taG1@ptxDuJl)?ovU4H zRP$5IUc<%9z{SNA4onJ53d#+_8!|D1N|r;U{|DiMq=MCx_d%!he_a2btYZ7Ogv&o~ zUd8J8sDyy7qOK%D2&~DK zUz@m=nBAJK4Y`2nb3&b z%YY*eyXTU!`Bh*AYB=4#j2X2^68#FK{Z#ZOkSg$L@r^1 za_WzIhlWP-7QgWg03!70JsaQns2sA&d!z$mrNb+`M547-CZpcfbp}AQ4|4rU*F}5K zg%iqRki8G01^vR5Jdq^?)dhS71wjm0w?Yv$$KX*-{V8?lS+)##9}#>d3eDzntYc9dK5afzaIwymisjp?s-5K7{U9?G^uAF*>8~6&vGSG<<~3I zUomo(@T52L!O30y$F7tAYmEHYW%BnTY2mdt!c<>S-%fFzEfz~27pDe?a1JGe9H4}t zO)P+x4g}zVvdnm;C(SH!GEs}p;~st$p(|-#M^@LiiIKtAUIwpLp4U{Jw-}svch;Ua zfYjY)pThoFz7_OWr&96?P0b)oB6J>&Y%5q}tgsKDr3`!PL7p^vm|LMlKx zNTvO{9|~(2cFZBG+2eh10mdK{f+=x=kSIi|^OzH09ZvBwz11ii!ZnAEQbsboaL zQqMMK!JO95O+bGPh5e!9WI!6&_sPB<@=ffMIKdLLG?H)sZZ9Nrm`EX#&7L*^eOMdb zYuqOt+*mUuvk-HTCOCJnh&H}jUp`*f*H13EvC<*h88ZbWNEU;VkN{tNLg>#JeR|N} zabp!ic=$b#Em+2knZv=*W<%;=3v!@u_jVAGi~GSJd6BRSv!F-&9w3=hg9b=ag2<8L zhs73*cs`{6v4G)0=|TA-ks<^kO?WwjyLwP{gV<1Ymi16vG;2{=!Mb=`fz-_0Ng`|z zC?N=#U1&^dAS#15uC?;rX|0l7hN)8xezE>duL?ep4OAzH8|rEZ8OmdK9R3YE!E&y6 zS05A}WR0UAa z{<4@0nZ)!Bm3H5wh5CGy`uQMe8z|Z@u;on{P_ldF)E=y8lpCyQQW6|s1d=4)jya(N zqUgg5hS_5IW@QQuLd@=;!3&Po2<>@8<)5q|09E>NymSMO|MY~4Kp7ri(jgj{gYZLE zof?7-_Z#5-hbNx0`ngUuK)PvneEc$KtRI$_V?*-MMw0qyjnYhzI+&y|iprtte+!MbA_QKcdp)h>41`oT@9Utd&9NnquY_OvuRT}!?@%-^UlyTw#02qzMr zXGH6`M@(YO>0KgAx*Bs0ux>=I!~o_5MH#ZlPi>YF8%B6Fp0=BDAP6wVuHeI(16TVG z<)0<+mqL&kP(e^qiPaen!NnZCGj{aIxowUIaQ>PkPD?eQ8F+jHtZb; zWY1dobJ=c8Nwc`$x)cze`6+^Cb;tkUr>`zoH+W(}$l3mG-{jY6*G1rjb``Rx9C1Q5 zNRsa0lhBil$D*iRcF5B3owMeUm?RO5G-BC@L_UI$i}udk18X^qe9tB-3)>HYFi5P?rmuM0CwvsfG~?Le?=6eWyhH zT*xMRIam~qsBt)2g#^SVZL_-RFjL^oy&lw9tq!t)V((=_jn;0MSiSuHsCUT=A&@>? zuHYRCk8xe*WkuZbin&KOB6mx5SnDjVI9I0EB1){hH>N zd2tPwoQs6d0|qfL`K^tVyS68uW*|>8ylZCiegIn^Di77Pnsl6o(2_hNaT>}cs#Aw- z^e-fy`K1|NjTmU|p^&(mBiC=fcDu_|qqEHWn&9?yN7&zMjW(pkrPaUYogp>&V>=BN zx-TYuWUlz_`yYiz2gY00V@CV_u#=s%gB^XarRxq!! z8*X}9tMK=goy7<>8v?A84H4(}&>~MC>NJ{bC^K_q^j=)|q`+a_s%a zx|cuAm`8+Vi)bE59;_li`M0Yg;tTlfOJ-~4Ieg)?+goc|L=dVE)xwyy9u=86ww?Wx z=tyI0tZzeIg@h=FE_ZsU4)z_gG9jlGas3{>{;;h=qAmMBwGk!RB6?KK5 zc6JDmR_}Cf9j9l+2@1EWTRLI{6*|tg@{Ajp15#}gs=CsLr%8ylck1{ETHCyxunZ!v z&8nMv#5yAL`>qp@aTL%8*>{21@@=rz@`4*4SiO&luO>|rK*V4K{8$1{GsEi)^I9c@ zX;Zncs2iesvt6)M`%@GWgiho>s9U0<;|vR`>cB+3+0E5^s7mmOU2c*9tt(lQ-+OHcWQZKC&7U`JD&pEfUk zL#C_-?3&6(UzziZMf~cPIWJ9*F{Pg^ zD=ACt!sN#m^hLv7jO3UgHOaE<;$OoLfAx6R)8zE zbi6tO9UcOobp?P$A9+2#STb+uR8Hv=S=u#RLM-Yp4JQ5Oq>wODfV<;b+@Qrh(grM4 zcXP|<);Pw=;CuavnJ_zyfC>t%B1eyVHnP6r8L#wpt~RqG zoBcWb`2fB(QT1W4X4lB%>U~NoT2W31QwO7RKD4JH8t8A#lWEz#V(f~QKsOd4?8utK zpQ>QYCP^6;Y&jOW%u~Y{aQEBpHTFz3+k%r_eU7)Lf7~^#8yKYaQsY@`T zb^dCe;CJHn29YcKIVM%y$*I--7q@08p7SJ4+}XjLR|%9iUKuC*Pr*{IJQBCKXvRMQ zhF{UBkQpk-2sxRJdx!Aju#*U#9#KncVU;I{jNZQOZLf}__Eswq^5HKaX(8RNNjV!p1r*pHMyKA!nPitzs~Hok#n|a2{L0l3iq9N zO-H}MYoB~%BsHE+l)cU!a1UWB?`P+Rj|LO|6cliEi2WWcx1%Q91wKpeN%^}JBTQ;1 zc0U*3>@+@DUkSW3*76c6M6DyUf14L`N9h+5zAZ$bM=+j zF*}H$m1)tigg&-*yC|^6bJK+mT0(qbrTR9=0#S2F1Ucx&jIw=W!;~d_d*Edp3S-J6 z9jt6^?jh-qlEbu8f{h_*dhIyQPfZB%ftd z_ShH%D0H*=e2!%)Zf^V*YTH`H66Qv~t=G!NG#9?9xCu^dTY>^>&oXYh^%j{!7_uqKm#&DeLuj5x zgO(~UzCBk~SiGm73R0HRoqO?&O<`#cYdxh2Xwy7p^Eq9J241uVK5+mdWbKL!Tn1r#VkiVT_Cf53(dq1|O@)6Vh9}K|`2~bk7}ep% zNFUTLA6&-`b^)<-;Yhxfe6!xmLF%+Yvn!bNl4ET73f)Q+Fh^722gMbvO)9Xp8CR;Z5nEq0^T9%#)rCs6DB2O zX0xZz`hD{AMt7oW>!vf?mdz|31|6cVV_~ebzck9nmS)fd*V=bUDk(#3`e1O^sq`Xv z!#j0mnPFzhVXd+>+&Ej6wN&o#3;HHU#4r5ptr8gtGZ{1=qq;DYL*md4m74^^y0hSU zpA2JVft37a425!!Qo!vHhs{J@;|t9mR^M_xUUckLi*j4w@VE0%Kf*CRBP4!=MS+O9 z_xvV2KKH3SqW;_?w~9#E^L>P53{$keTy37PY#ulP2>61v^``^4e4!o&+KMY*#wn*T z`QqQ5B6~P*wFH6yhV-4K_~bi5aqquqMBkAp&E7_b3CP9^!oPWP+1E|QPBB-~qsLS8 zFqCvP9C+`x5h@^eY5D>tb~3+}M18|zneZ+fj@-8p#eB)CCU7V@o4sFwK_E@pML9$i z8jyN~Lk`O?Bz!}_s?Q%m7DlqKCq|2x1mmX}fNq{smJYII#%e7<+&txmdv$b3@;0IU z^#}FSwLnumNs2)W%EDyEt(WmyS9teCriz}*2p)e+wW9-dTc(K2E#HyVAin)tQCzdW zl2{x;PBb^20GGb}b~jOghN{aVT@LvY=JD9IYdY)2})G`2)782gaQf#CGw=Hk`7a)7hTTN&{cFdbFh#A=wsrNX$C)&Pi4AHUr;=X zQpm$h93_-zQj0uS%BxVNAt{iiKrc0rMs3+o2{%RLMqU%SgnGrGe4L`G2e8bt0C zL(@;vy84^L$XC3fwzl*Ni@QE@wc?%d;qdG=lMt*!+TRK?4U#EcIBp?|92WsXXq`U0 zf>af;8;QwXJm2>7Qd6>}H zex{sB!xoV-GGM*^(Us}@?&}W*9AX|O8CH%q4LYaK=dCNLROg|#&Pt`vrN*O?Fm+2z zvG`rorQ8%PF(YA4|3nZ(F=+^;(DZ8Gp%j;#+Ngs)-xOYDnJ}bjb2?*#jhStM!=PSZ z2rOZ;wxiw0&f5Q~d!ie`X}$~t6$EzDHu6fC#hj)3^rk1&u8XDZd2`|;q^pDnRqJlQ z&-Hz5n+-1KU3Gam8BSzn0_Nt%7)jS#u>hi6m1mIrA(o!-N8#spb=xJ$~Ral$R z_FrH*Nizyebz45f$hMR1SUY8PELM*tQa5Xqg>jF~DI(6_@g zTH%h%V_s(3vl(*z(&^dfNz?BvCY^O%tT>uZ)9$Xi(eU9jwCAP0qG46jJ;JH((BUMg zv})CvFdjbp5zR1LdBRoQQAW7uBUs18bsdz@#!fgbzlFT{N2iwS1yktANZH0qW6}K! zYZ^ig4ZCdukBJ#?&hM}K6%}_&p|0Lmy)NyE+JrjdZ0v}7t#*^y%c!}wwRA{l!RA#* zH6vNh8q+PjZ0q$E`2vSg34h9N73kA1>FKdFP9|U1I$jl`5Rk@YTqY}M*8`D2o!scp z>cw)SO`KDeZB;dey&KJ3dJC(m=lVznwHd^Q8^yBj72VzGV@pfK=M7(T+S~P0l^N?I z;LL+@-j`&U+xTFS2KCZ6CfcvG@@StiRx@eo+O!?3O5AiBwu;&-XgA|J%pINUz$R<# zHq}?$AI3LEL~O{{N_R9KDD$2l>U0J(2Yp%bNAkU+V=6ECaBYY$Sobd4GMvQ1&o5C{ z{RRMT4}HZ&wL)n_X;m`u>P>cXw?ATTpykQPxCk`S+E-saSt7gcU&m~x6T(Og;qq{T zwGeN*knk4g4&NvguE!4q@eg7fLaNdJVEJ*qH0qRu01Mp`F{N-563<91?%WbN8qWxP_-OMG(=y0oUDkEBTP%p+;ecGl4hxt6kQ!0|F&ts(pBU2+ZISjB6&%Y9@WJ%PX|ruy(Yr6VhgYm>QW zX$d_4UXf9hhgKp`3PEu#;6VQ;t|f)43D?4gAkSIZ)%*b!0%OWr718;MLvqIy2Eu&& zZ>!Fwdo2*Q0o;7NhpiAvOXxWBFVBQ~%cZv*D9r~8R${BN3V}O5=*Fao#-BfqWU;%6 zc*o4ReiNvi0C+=rAN_EC>#Ed6s4Q+TT}gFg+mI~V@4~3;AP9}C&3+|zA`@!Zg%8Lv zuSG1|Mo$biaEg1+4b*(t~eEaMGO&4fyFdN z`!si>& zl&+k1m*cIbhacSmZ*Q+iyNs6vGz8_iS{#7UzAP}hFbXOTIqMAh5M#19U^pZdYh+=8 zF;xhQTsAP62ZM{*(F6~H%Hg0xx+h(}`n%OWF*fdgNou;E@2C>pGKyEau8Q_-30E%kBGteH# zLuBR#S`d@CW++G9nMe3t?a#;7xwu7s-~qE4T-!J5FpNg8j^5w*YH=_S`XhpV4s3@x zNyc*&S!8PtkJL>z5f+HUY_CyY{!BMbv&l(vj40tQFnPWN>hO*Jf|r?%Z=O|(daKy` zT0K8MN799CT*T0;;

!Fx_SymE0Uq@H^&Azlf&vp*mopa-K)xsyQT7^XxwQn@+Eu zphU+jLe#@3b3!71ICPj3Jewr$yMj!(ZD>mKCP@UO*U7jCaSTmC=zgC^l)|rU#`nN^ zvG~1BYX_}8-ZM?I-xX4SAQpCpN`m}De#S5Il>MO-Ff&~%19UZRhzZ93F)nHExg=%v z{AyU0JXs@^z67PmIerA!&QIxHA+$LBkFEsWvX(H~kJDEC|JYal?_57!%#EE*t(a8* z@%6tN2gxf=B7=#bRyRyG!V30}9~c2+VgTsKbeB!0jeG>|X8h>+M-)yVFhQBS3u%vq zv)bBPwSb;Fu0WP7u%BS~q`0rpv@~p;reO23yG^WWTx(ZWO{*_7hG0Z_ zlh8FvWB3cFX)w6@Q6-LTN_1=_t@vU;f6W8Q=X~)pKE=xhIH9wu>+M|X;a5Gj&+WZC zXB2!*(_@yo5$7nV(47+YdJKJ_Sy8Vfo!{mYBuYm3DO>*MAMFZcH%`%=AMfg;kL%wT zO#e>(z~!HU6im|YUjM3+{?)4xr_?JuD2(g3+8i~aQ`O4L14~vDT$Um>EKUR45{w1Y zX8Vj+S1c{f6m+?7xHv4{FOoy}`hAG0MD&be)UWrl+sF6n<>U`UA4@Bd9d9{zIq#Y? z=`OIoUhONI0~&!AcKOoJnjV={M77h&_$aqiF34(~KMDmp**=|@_v37F&)tE3rDFCo zP&YWr36K-%XF#zLW)IhIERZWYyv6RUrz*Zx9bh3_3g3o^Iowv(CT^{)a8FX&3zad8 zG250&UhI@rc1fcBD#9mxC@K~k?us%Ft*B^Dh8`3k72c70qP?Ys13onHz}Z-M6H$1X zF(;4N8_IZElW;5pTzuE1E;ZUWGG~GY`U&8f^gs@TzwE38jB{rv5Ibgh$kUO9jJpTk zL`j7js1R~2R`_YEtJ6%b!M7%0TXyE$>wgI<_8vsH~6pOq);03)z2u^u55_0pz&iW_jRtE(pWKb zFK-Zum2|Loo*|_+$889N_!N@cGvPH(1M6_3HB+oLKj65Qo-kUoIhWF>P7pA65N9f; z>?NR*%Sv|C+0+}jSEQ9@LZrj}Zj(e(SHXW@AI z9m*{3%zO7%)Qfca8_8flJX-_rAq0_*xuo9no&Xtvs~*%#s3x9}|l zEHemkE_+_ZwbG^rErFt(WQ5UtfWe+WX?iYC6V?pd@V;It2;nbKpfpQ|xjq0{`u_(C z`@cZxml+gB_1pYAWqIF_L^fmTp#(=n;}nvpLD3O+*&pDxedH{zpf^<}aV5~lzVA_a zzgUAtV7!s-*5de=>CWoICSM@A9p9R(l9Q5K{gG&&ld*9{B-s`jXFa`A#qy>=DlK05 zTsjfP`J5xXR?Bm)a0@5bN6H#woBk_hReq!_$eWXS2)J)d3(66(#+M*wQ`qVjHz`f& zUfJuKpU_>0?nM;$*O3ylt?XAYIe4-wa;6D+ufy>Yf_@se!{_m*0h6(OgF8FEAP-HBy*9fSCJ_ zUrD5_7SBh$_XuSK zM@MU+cBqa^ds*d)8i6;eLim8L#4^k}?HqN~7>>(|zptNZlxa5pb8L@xl7q;Dx$1!N z*cDSNO`Eupe*g;%R_+)fJ3?cTwJJ@Q#RGiDiB1tx;wG#OeE1dAX6)P2(97gUn*~33 zmZ3@d^gao)ZYYaChn$5hvi*^ViMwetGI{XpRONDUr=-Gqo8F7C@Gss37uSMc?Jw%1$gT9Nl~RUAz9fTcMPwn%22SCx zE&WB9lsjV`n)VYl3Rh=Mp}qD**X)*lJ&d1a_)s&D0{}r-vvnC7(~T&_XY2{`o3|y} zR{~*Pm)ggttrF_@VnM|LJu&kxXtRaC_h*zk0t$sVKppaP9hJ>N!kB0VUv9{{tci3_ z*N@`k=JSpKxp&awloNDjma&`?aZlK;-`7M3w5&wf8t1W-clxruV(@xIyyDAML`AhF zWxRsm%ptUoC`Y^^d@TJ78Le`!pWHl^@(3g0EtzQ-;|WF26{<{G`SeTyYYXN?j> zJ425m6+Oow*B2!qOZFq%piIVrbMDPU9-RGT9)q7RWbd0s&s7Wh6LQK}7*kj-7}-PK zj2mqeatb4gBRYRb@|<+%ej@qf^&fIvblTezf*<8y@qaA;82=v@>fbA~#?Qk@Rq_UO zahbBy^xD^2UP{{!M&3RrjaY~Wi`+_@ft~_BfZZBQ&y#FN(yfoKa_LN&p6FenaOsRA z)yL@szeSn&gGr$H!T;<}wwL(P+4$O%UjVb?JMpZ|4I{{g%Y^2{uZi)O11 z^c&t$%`;^H0$xaDj{uWc&9iAh3?7B9OMH)-mcIzZh4q#;u%q$F2OprhclG7fFD#%O zlz^4=9abG}5R*j|?K|e4#t?-#VQHkZc+U4o7c8edvKh@GY;nEnNTDzLv(b++WQ^)V zhFDISWQ>|a8sd6|k#ty2sbsYpLmbgS4YUB&A-8Ct0U8(0zJ&Nzabz~ud~KvI=5;w4 zvFgx4w5KSVBJIAq_=V<>ZM3I8nj`H#k$6{C;`H4jfbfvCfc;);j(K@kOmYW^q_ROE*T@CpjFr|kzIN4EA+?2 zK1oPHDiBf`4iroV z8SaO2-V1^YhW05@+a2zPPQTJn=X`CZzOI0@$pNr-j>46J`W3iSp@nk$)_FY<2N#U)v!d1;;>`uH zbgo97```q@z+O<+C-j9t5|e?LgCt@3Wp-J>6EHl3`sg4Hs6og<`KW-Xz>q-EK53GO zz>n#hpf^ZoxI;8ti9Y#V67Vp{3D_x0<^*A=pfJdIlKPxJc0hH=>dyLe2vb&CuwU$H*KoOX2*hgRLEh$ef zG+?GMUyv@`Ywq^1(9LpJzH{dRWLh`W(?jwc08Z+q$pc?uwu}b`Frv>7(*0IjVQo;5 z1cU%C7~ZD?{#)S@Wp*ZL@sS%m0I29_`%;so`>n$QUOHTCn?-AF=U&gxO0y01U7o;=Ye>;=W#u)kSHh=FkR;T zcv0;Q>>VO#64s6qz!$^^cR?~Iy~_yxfC|j-OM%QI1JMWl`RmbzPY50NlA#KJbOw_G zX`iocVY{lM-+b$6ou3vf$4)xc_C#k)K3!=H3%_;ILf_)3v4jehM;E?|t7D=L1Kx1J z11JGlK?HDJmC;GBZ0fPc>E-(*J^ENM-*-(AgG5QI_EFzjXh&{7tFy!#)h90PgLsP3dx2b!Be8#;m7$?N7Y#3G89hotM-+^*&fp?Go(X4 zE0kzMTHNNVu|&LAN4LM?s81d2loti_9wQV+L#gRVsSt^n#XP!HmKVp9N8Os>H%3cT zmHtFW_xRGQf+cyap-z90t*$I<#3Pj7$55%&&sAwc*-~@ZR=IEW=A&Lsb$IZItxl*| z!tn~OUVVg9IUu?04UY6mrCv?VNR9dn8hjd3^c7P*^eMB_YLBbDD0O4wu~)4jNhbU8 zta5G2QDa#8RT(^?YTv9fPh_n3I}|VwvNzGNCOYP^yAl|>@iwNYIz0GFug*X4aRX&i zWs0}uw0Wd)LX&k$!vWTnv$q13o412|6TdhQ--+2D7 zDrJe6YDF+_TB_*-p9NA4VzcP;Wo%JWbI;9xU8uWtk`N0{;2X>>Is9S2t6EIWqOxiK zU@V(eGHT3pFP)vCj#U|IE}OOb=%ghWYF9b-@GYaQJg6>hKaCVRl%yr;e%M`DG7!VdEH%DwEz3&O=>vLr%RYBzp{nE4VE>3yZ3{=mkz{dEZL zTi(mWg~iW+_o$39_I$`sPkn2q!>UITZgvEDwqW7YK>wH2_7$uf_7}4GUq3hTL?~x8 zV4K0NCqwyh5u1+5pI0V!fQz|X1ouNm{tCtonSyd|8I0IW2l%^H_ECd<6BjSJjJUSf zp!D_1IN}}23jt^p$gcyl(Dpjp`e+pRSKK!gNaGFq4+&>tT!bLuiyqI%Lx*y&0!DP_ zp*}l(pH6;U*P-;c2`(KSVk6q{L3OJu2^E2vr@v_4wq11sEjEV4M>+np43LNB+aXI_R0rQ$Bn(mKl4cb#LgQnfX`&Xo zlGzTW$qZFoe{RPV=PkX21!JxC#SOCP!>uT4=Gr=I`f%+sY9EUKf@yeG7YP`Pq!2ix z!Kq;<6G=~YVEc1pSC#A8Xy?;OEc{Vf_ziAlAKs)*Gg;KQOT!v{3CIWYw+yFLhK@nl zS?4(7hxci~N4Nv^AzTn!o93|#eedF+0JWCc#b6z50TX{0opqG038P)6e=V6~?Y)rD zI!s#GU6z(wkxx7%?S((NPIq1ftRSkVb<-i^&D7qI@yJnhD${Y9tg{@9ProK_aM$#Jo z@wbfePYl&~FYM?Xw(MK2rRt?vNG#P=mnq^pGB4sWMk!YgwqTg#CYLHpLaT9qNwrzT zCRgKm_9p5L^_8~jU2tNpL(XADA5);4PK&VZsu#^luV|;je9O3m)?9}J*U@8KKVbh` zYI=Q+;Y=oi-a2x)S1|UH&Is5v*~k6Aygr*762-r!T&PEycPk5j@;A!jb>|L$G)x;4K_0h`__%N#73h^&As$U+R`3e2K_0f71Sz|Xz|WCx0rp1FTDnI=d3T9(%xPK zkkykr6vlS}yR929n^866TbQ|X!t%FA+ym?nzjL>EifDMFO}u_k2vgTm)w^B=@t^lR zaUklr$9{&mL~|8R)u?~?7>U)5Zvu4bcwaBLajs?E1MPYS7s#^=C<$VNqn5^G5_j$- zMdrMSM{UL)HYuZK2jrR4$BoRmiqU4a^A{ed%67^KlAWVf3c2U=^NnrpY~}_HI>M|V z>*3WMUW7`r9$D?`;b;7lSR9fUn?=E+NXSjMsYN*w6mDiMv_rv4;Kl?G?B45Kom<#Z zyO&Iu>N?hCQxY^Hn%inC*->4HqFppu!Se)cFu@gT>o7Idg76%E6x9{GJ+G&SL%`p} zbGXoq3=ucOa$3q--}3mGaGz^KzE{HBep?G3ZkCh-nsju#tmr8hZ($r3Z=lP)&{K_O zhq37F|MvPd)Ro;EI{d-hpXz*!1 zl7S*xws+*93nPeF{B(|90E@yk%JQL?xXo1`PBvDMpUBFQchwE%oEGwiM}MB5ADLMS zK}Biln9#F`aan3d^!B&JUx>ukQ?{j$d+*As5o7F*6*DOj4%^vE3r)i8OR_eU`?N&D zTvMFnTD*nSqRuJ|7VN)R)4Q;)5p*IEUq15UL&j^Oc6HM(3Bi(r@Xgwc(9w2?=|7}> z;;1dv7KSl_QI>~=fVcZ{`MOMM8fI+7=X-nf?t+u3q{pcxBJ#>p4(e!ReJ@#FN1;`c zxI%`fY|EMwEi;2XaxbJ zTTm>-6%LVUO}S}DqJVwQ*9r8>4p+5Um$N=UC=u}cZ{9zgO1k_HinwoWd@@0(vM9DG z+_hlFET-Q?{;=kmPyH}2%WG^6P6Xxz1)Dv~+uTD~X1&Epq}s^yPpjhMDN< zQHWXCsca-rHRmch>ekhQbPD1$E=DjONma~e0O z7|v(wA_<111wZL6)szebB{&lI`O5!*qxndn>L=udnU!N8$el zFet!ENlD@S%O?(hC=4i$7PSS@{F$kASw?Z=P!Y1|kXG~8|sHklP=u2*?o}tAK zRHSa8s5ojW#HBDM88MiC%=_0PD|hfD-K`iiWT8u5U}rINn5Nr|E!`1uBAqH}?3ttc zP^oP>IyfQ8CgMU-Xs5wmMv*R1@+Q6%*d-fQ-HO!+_*20}!MMaJQkh)-J0$9-Zq z{^d+s?Tj~K--@T1x*n@s7WbKi`(7kY>||wiwizmk7uLbIj%(H9eEwriQRpM;0^6!e z=hIO(Ab?UeRZ0kK+4f^NB(yZ(M%sV+rRD)<`>p<1dB?g)Arnm?Wx=A@pw-_()T(P2 zR^(p^w;MOI?)19QVC9dXh6yfUAeS%I(HI<7cJVym72 zU$F3YRn|`Ov55kp%I{m{beH$Sd`q6$>0zM5BCeWBjv&6ZGOJP9h&w^55QTaIp%Bw3 z<>P#iF`Q*G9LK_|w#xy0D1Xk+T3gw}q#U=LfP76mIVPj!%_4*K+6HrUF=0t7UalD} z9QYSJpWfEEFWt&W7)gnOx!7UNmjos;3MQ+*^dBw#E--g*lsI|p*gCkyyttCPvF4p2 z{ei;ymYUYoz?DkgE?=PNX%3j3dOudv`^g)o%2MQS`t*m;>rZK6?RV4YujRJru7Xsd(+|ti-`#DnPMxV;`AyJ zZPWFHd-+aPVf%Qel4KSEr*(Ou@*)v7>p;JHwc*|}!}!_ltp*d`(Y!6Nr1_AH>rnJ* zAp`yb`lBuMQY341Ena+E_*`4O#sNOgSIa!diGUHB7)ohON{Oh%)2L)>Um?SL$W>N-7`nF%a4x_o$Fd@5m5I;CQ74hc3i?COh-hGSG znR&sbeuNH=q6_gQS)XwPpu^wev%IH|#S2?dy^ui8 zaehzG($&z}1~zo|HzwuaWwlrl8)(kQMi0H?L}i;i^1y~o{DvE+SEIBd*MEVK=wH%Y z8jSWD04QS|PWY-Q0-u*j)wR82&Qb54w0aG!ax7^xv;&~Df|6dQp6_qfEMrPJ?E0*w z1(sGDp(_pzK*iX_2|s(|?_Ky9`a6He>;1F4Q{1QhJg?@0HzNww80+-hW%?(8e+RvW z7NH4cR6B|Vb3f2?0)cpky3pSt3+uy6yW1*(3so*?wr`MEkL!m^K-uj>7~!<=a#xFZ zI$^Dyl&rm@*<3`UkTZRpxH+BvEFDnM!ieO395;ZO#ZcrWD8v|W*l2J9N?w35@fMMB z_opdk-%;6@(XPh2jDn}Ue{W>wjWqiZmGGpTxs~UiEzq&wWM-usKg3|IcWtmBY?N`Y zfJsTSEQ=cIr0;MwEf)l=pmUteKdsLBWcQq0aqXu9&4E91*|`is{^+NObCa( zj~52&^e_lRCF+If6e*>d!|rUYi$>Wf68<*1W+jm){5>bCMgri0`{87-ygTNjxba69 zoTg%E7%;6@{qkk?z4HI8&$;6@3Jl7CLn?Gh_&3b~9=(PNW4vv-xOEkYM|CcjV#fuc zMMjuQ`jNXOzPy6s=iD;EpF-$%OKQ3FFEBwLf^PRQ79P^HWt9N6R(9W2?V%(9zn|lt zj*d7I@c=!%Q(Cn$&cqX)F+aW7!?|C6I6mm^1j+yt=QF&O&q8>9EpWvkCA0R0BKgc-G zO}lz^2XZMl#kxJMp%7(zcZizLjy-mB3CuBu6%(oRhw_|Ni;S*w~G<{)bQw~ zWYN}lFyso*mHKJ3Y0hJ0cn(y2RZ~=dJC^Lr9~AfV&F^{=R_W6GnW?nJWP9S7gVboR z=SkHb_Pv1lFF!{SpijmI;W-j2b+L4-a$_++GK`3tTu-FI9W$Rsx+u&-dV@@~>p32^ z+eFBG+Nv?$=6poRFDi8jw`fb!#B*Mv;^4($;2}8zsJe>EVUEfEkFI%!(d0;oRhll2 zV{O(}K}>SVvis#|%M8Y-wG5#_#bXw(SI_}Qqz9}!gue&Ff$A&(b^9)7AAI|f>0R0{ zMDSG&cMIZ8`utadU$Kul_Pr2^^%m!o`>K1L;1|B`)BDtA>aEW_$b5Cg|8Hp(iTPwM zvO34F$Y<>dX_eC`lJ#rT$>-mVe#52BN5TWanBRQY?Gq>Nhs#H$n&03yt7u6HKUZ!e zk>R*;;Whot^^v`f#n$ngGlE}2Y{TW(-pitI>86|?Na&T&&KnEYo35cP;ll;^aF4x3 zdcO7}^1~OpFQ!KA$9HUBRe4SwwWSfEth1|g4f8kUe6Z*%is3I3doBlQrVQUT7UKQe z0HHk9X-r}Xndyw&6SboER=-7iC;TlXRXLy#pgksXc93L;jd_*B{WboDu@#m z)Qkq2LIZV%?Y3+5edovwN7!@g3pqS<>nkCjq#|GTg8_(dYkq%)2FP!B{@Af5tVQD+ z^c2pbt48ZO{&VTp_a62k&dcy9=h3@YNjF8ZStVJese5*WsHUNwO%Gf5E0vD1Skwtas}+sz31_ORtde-bMMDep`2UC=@kP9!;NLKeZMRDqX78yasp9Nk+ zgj-ud3HxABL#%l9s>W`Ba>0E!VC&`)$p3%f=LOEXoZ6$J#%WEQZBO@BYxpP)V4vlNU zBg_?1{;h$GvTWSIe8~nm!}S|Nmvo-nm-V?TW=DnNN5SJqZxcr0;z!}4oLYr`in}sR z*>ROut%s$jHg*EE>-`Naz)hl!e`)xacembjZy_RHLpf%(5Xu=wow2!5RxEWb36OaO zbdEm=lle*Rhki^~wEu7eR0yS9(Mli(>R1FCI-o`ZS#yym1FjtqW&CdqICQ>=_kn7@ zZ@g(evD*CUCOuT+LaPdh*w$)+uL>#Op=^Ozoq67Z)xAP$f%<^?1g;qp*@TGK-qZ9< zz+_!Py7ad@5P5@pVF&(8gMZaz=olQ`cI6=2G*G?5)P}ULPr5?L3WJ$LF0(g$Ky;kUjg@gf zBFww{!fZv!Wm2>ByxlQvtMxli`;JXR@%4fcCWm(ih8ofqEM5CfArF?_a-gO0u2f8M zl32G=Znc9WR(qkqD(C~07nmebznV8K09X`A+*>rbA^Ht5Zg>!H-Fg8(T4dBmX9*Rv zqZmyxYl7vZ=-+^)G7zBeFjjSg4%fhY^A)d#xFi376aq2lA^-JZS`4UmBdD*|P;-IV zLNCk2uI$8)nF;b`AdoHVU=JPk!mEKLwM<56Qq0#t)Fjf2jOqL^Y{JfOd9ba3oHUT$ z)@j0R%I#t>5%0aPz+7E{ZHXd4XfDvTF&^5Tj|sGEf(*3yqi-10rIi@@{nWyE)pbEH zv#_!I=uoc9`Hi{X8>G`+30RuOZPQ?bd7o=;m4`5VV^3X+_S-1!m)a%?& z5AFStG=~Ta+iH`vJwnR1?f=&Hh0b%kRWHywwYA!vxVo z)BOE;41Nl{UD-(W%^bJq5XlZYFPt}Y@9Ez+NjZ^wFq|0SJ3BUrwsj=b3>wi3^0uVu z9?$hdrjmKxg^wxE6k@HKd(?9H$Xi!bXTadPtI^t}9jZBToXGu1%vM^0uQaE8cw6p8 z?DNQ))jv!RdtWPWCs8SpeKg3dQdc*pQa-U#pj#=2pW`B2?4cB1Rgy{i_C5-IM;CnN znmV}oNApZpV`MX6Zw0Cb(q}^J0nx_jve)v=hb@KHw}iP|sb5Eju(~Amd%lXgGI5-M zT9j{;*FK&sSv$gNJwnt^F%r0PMlCP1weFuIOKM|%RQe0k52QgA zN6U3O%SzbBMb*nc>W=~S*!<@XOul;SP{QHYuY4a~mRRlKdmP8BFpNs-wBTvjE;U;9 z_d=EI(nM#a^J@``5Cc z;lUm9yxVtsbn(D@E|_K{FAP*JxCDVc1L+K*c5P#Eco*{1*?>-c$88gdut(w%eJYUA zgD;^aBI+=3*@iE*a#wg+i!br{H<-fFFOgB7Pju90GSz5o^+3yjhYFzBRwJ&@jyeK#fuNp z>{^KJ_28Obxl|j6n$W&>LTsmA@X(np95K)cpcFfI8S?jKUK`+q)voxRP7|Wkn=jqj zuWN}|J4U3TdsOjKea?vd*R7-E&~jt8WKZpZBc;*ji895r)Xs-#M#+ zsCwXYPpDjxjG~G$2b|c(gh6%F*K}*rb#Apv5z6~R+eVq7Db{ImBfM5JC_`;UPSSs3 zz9If6a!PosZByYsEH6c3U2{Fzfi3$fVjkh*Zgpfcx@W-v{CzUpswHR*RbmRR7KzR` z7nL`hQV?t2pPZ=lyPI}|Apzs*?|_rC@)fX{vp|8c*4C!JUJCb~Ky(zVyU?p#OccU_ z1}`eWEMmP>+s-~uA0zYQCa=RRgB!%LmoDY3oEx+f8HmiHNOMgsY8iH~(bhfmVm_o* z*)dyx({;oeU>R*Mz$fznA zRG#g{Z2ElpcnE4p5CG*7eS>M1^ZOu%-$oK7@x$(TLrEPV3T}O4))*%WDSCq`J&^Bn z`an-VJoB)y7bB4*@i4pdhzjc8+klbJi=4U)Hd+!?Sq4>6iKjJV0F_;lGP_lkcW&Ev zyx#x|y_DLIYvrXq@1;FU2GD*4qQB7&uiPDv$ezy!r^T_YnbTeBhTE|qrw6@YLR!DO z!!nFUkPk~IpGEq8L;N6t7r}&hL&FZ{&HN}V>ve(IMV@2XS2sF(mWp;9Aef@Z%ot=J zW}`mfJH7D(dw%m%7-hbR5G>WpdiHin+4qCv^*GTJne4{1tG>l_uVZc7nQcRL;&=Im zlV%E8?_NJJX&C`1pHtt!IU<_VoLt*aKpRA`zRmR;$C`%X_a1#o|Eh% ztGaBk_1(7@DFCm9IN*Ht@QHzA_24APUUr_WvQ$NK+zFjVCGv-6WB`B}R}795pLVb( zE+2U@+>z@w{OQa(a>Z+yLGQYS(#kubgl}cmD60!AW(&k5lGx*+817cj!}p$m(5@+S z*nxDoAfw)6(z-`IzkW-&2Ye zFu!&0`LHW+)*OMKvy9)v(E=@^)q@;kG4aU*)&Q^tBJh;q9QYN81p$1^J7PRSnD@oa zcppKmnp?ey1!>#VY)=Vm3Lj`qn)5Eq@W_5bjg#RS3gKEg4S(-pRZf$*?xf(wczp5f zSXcYtqVhf?g*hpaqW*>27Pcc0*huP5@Qohyx2-YE$}i)|l$>`|5481H2|nBPocoYP zB6IeW16dl(GC%YcfW`C2-2!t+S|!~CEAHEObmt|X0j@8PyLXp!#eU`f z{Bu1-J5FAWx;8Q%PyjQAJJ&G9#vYOfp{C`pv~7}g>}PNFH;HPz$T8&#D)gA&5%)TM zs2&5x)Sx|T5ZY69+$V26xkn9|aMx>3?TcjEr<#C_4k0np_r`N)S%d`EcvnK+GJ zzn|H}rRCTVw^w&!Uh8C}Qcn)b;Mk2**k($4)5*`3*w!0;e7X!3JO3r};DM(fIm} zI5DTrO~e)c5w=*D7k5p$_3K(NcGv4{OVlggaZ|qluxV5U;@F5NrQvoQj-gW-=>tS( z@wL;J9aD3$u~C6Ll;AbcB&Yn^8j9;zDly zycz&~#?25Ay+*?i$1=p@I#TthiRw{} zpjyXibd6Wz7OLiI808A1t#Zt=Ih1?)GhA8U@7(4BIi5V(rx?>mmNs$5S0m-YFnIw; zoSynjbqS|{fji<3|G*KyO?6~z)?tU%R^%@YI}7Z9ZUcHHiJhy;}4& zMuS70M%oKIb4A-TLr04!Q>_e<_SU}|6ozEe(rmldMcv^?RkrUy+ANERO0QDd_KU7X zYwshjlm1e;`u&E}9_+P9>wyK^1qY$u&nV&KE930@Yr7kJo_7$BkvG~nVnOR-(Ek!F zB}g)eEr;Kl^T@9<@B<2}lL$(8jIjadj@w7!+%}bPuNK5Z{X%#wh<|5@?*jH&y^%Ym z3FU(iRrF>=z)w|mUC}a_v&8P}OOS1N6gxXYIWxw8?@|MCS8MfVnYe>P9h)s2)&0i_ zA0Fxj#RUqW_@_Pk=C$9TFUa16Y4FqUs{ z;>T;9iptRhK)O>|d3=w$ilq4GlnE8T=RNFZ)2kSLkYP)C73mrp;SkE57UHqWGeE&L zK;#GwkPSDs`OoJft^uQ0-rG7=af?6Ebg}nDHU&##$wvn*xSHa-t@D+TR?gn0?Hb@& zHSwdb38U3iNn5hC-B~J|87d!uvbSVSx$&Mr=Wj#jxjaq13PWzxF6&@u`0jl&Lx>vt z%P?aE1T~8@FoNP(T*sf;#-W|C=5i%O)l~P#R)rrfs_LCk3|p{P*LE%5SnXk(HG0kK zD?A_IzoTCbL-)Y250>Hf@R0(Bu95#bs@ltOM2q)IeiD^E8AjsaXkhKF{cX#GkY$BR z5K!&zENNacVBy7smYGh1`+ZcIhzRmNeHUa*+doqzg$@YV?p-^Z)f8V$WBR3e zx8QUI=1o14O6=uec@$6`e!-*fEB9@48gT}G2KHLQ1835mC51GJ2Zhc&Np7&fOXMg8 zVkyWFrkg6m+J$K>#@aF|QYjnzyl~b=zgkns*|f`cTses)FR7*-0%l`(m7G2O)3PKt z!eXeFtOG9t`k7*n9xjPU)aGYIWY#SLi{eA(^zBE;WD>jQ)G9r_-`_y{SPTTKpl=wl zM8u`2*^1yD918DqQ~-|E&h*K!>|7z+2a4(=aaN`u=%gl z@gy}}Wjs}EU&J3AHpxnTA>T>MiYUnA5~Z|%Xf;YeJpUv{Z)lWuf{w`8Xz}7eeJ9Wx zCOWcBtA$CzIag(TQ;fb`As6;{wV`62;{H8#>)Xvu<@b3zl=IDQJB8^Y_*xS?C!`h^ z2whM}T>gh{Sv6L=|MUSlt!BU6Gw83HBQVtwuIp@+IF~m{m$?N*E_*JouwtR8>2;E z7gs7Ao+*hG_lMtNI>^;9AvSU^$sE{iyUzY4OM6(d5L)u@NVQhiUCr%dS(3+$f4U}^ z2#Nj1peKX0q=}hI=Cx4*g)8ScKZNCI2Eg~RDAr~N)Q}h4oGd$Y%^y5gsgq}M)^;p$ zAp}G0JgYQTrwN3&n4cSLVK!>02YY@Nu&-q^jo;<#T^XB^u?Z^!O+&rl^DKXj;k*Re zOdh};hR&N?XcPCwr)=#^w(WmG<$kvR)|pGKoBo+RfeW9ood$nzu7j7wnQM|j9#QdW zz3mX0{ke7R1aG+8k^x*cw%Ac~1nsmBf+1K&!|{;cg=98ZY9pmb&T$&C!goDe6LU&8 zY23m>tf2sYImhlm7%pS>AKdQsPkYg7-TyS)|IO5$&M54D)scCStkZi7e_oQ5wy?qb z7g6{$%}NoLve}8{yf^nlIYP~aq{Vo`sh)TvvgG>Y(iIkO7R*G zj&Pwm;cQ$(w7<3z&@9sFi1IkW*EpX!f+yn|d=aWu{Y%Gw2m^Taqq<&KGG*4Pv%jw@ zh3`Z&p4eW{$*p@3bozldFY#;JDLcbGI56pyumk;fg{_NU2FadODgZzKPT2y;CEH=o5Z|QOVp%TW9*&! zlB+V-U7xXl&*;g0X?XT+WOsSwm2eK#E5o`-jfOa43eL#fNV=hxi+Z%Ou;MTUd1d({ z>?J*;7L*L}F3NIi4xM=bLM5JT5E;4>E@#<~+aSupqxL|MR_GXdmkfQHcT}$ln~K;J z$qVYNG{k#XjKabloD2TQ`Y$;G5gPt&MBdW9#OEIimS0jtu?2sx#Z9!-%p>cLJ)$&} zs!Z?RfmN$b&5=ma*E74U4NE6z_C0i^5oQ1WbyQ5ZNwMpdkaLW{bq=yKQWnuDW0zdG zbPK)qvBKMA=bDZ&s%ThyM3i@Bfu{Q8To$G%&kk%45-QWla>~O6hg&-6VJvC2eDRk{Ree6&hnxY_!WG!zRG|9dK~|^@T_5L z^FJ{&1w$uiQ>XvQF54=|N`HBxW;QgHuUTpHd)9kgP5(`W-J}hG&JM?*+huPDvb(m^ ztege-3BECVDgmCr-xPP(Yy~ZN_pe#fI1@Ox_;d8XwOR03aA~;L>=?{J1n%YkIiRK0 zD9cAo$y>fyXTfiNC5wY445!`}wpKyE!;G6mS4!6}Rd~Q2rJ_}ONHjdw*^&lo>5Ja5 z;CRihLo<>O5S$D=vFx$3T@LOjXh~~bCo$Esg`^Sp|NSI>A7W)(G&Bm+_^c=NAUoCu zCUbycp$W=V%~D-hll|6*3Jg=_3CC&sMA=Ql(|6e9HCj%?ldOQfU8tgXCr<+A85P?C zm2yQ#W1ET`;9RIh+f>WeCLMDd9;0XR*c;FmoDlg6{OMa*ts=xCR@Odxp+>8n*RlRu zw|bxd=ZNVx<3#!&AmD$9n*aA*qVWITC0}Uy|2BrIJN+-709mT%N^5G!yh|L(HA#Ok zgjdN6B{Tli!bqiKO5|DATBAbUq6~<`vl<2=i{Ox0lCPiZ!oy;m=FMr8gc=Q z8d8HsJl>WO!%$%o2r&J^=?RixM&;P2Dk#obiM?_*hHcD{HGC&aodMzyL&V=OXf6Fa zcrZxJG={jws8R~#Zv%OeiZHA#RE9XpsEZEjeXQQ}dB9iH#cN(+4o)4PGIlo5?3u2q z;b~DGR090<`W0=z!Nr@dxTikVE>FJ*)az%WO++PRw&i6L3fWxRGCe6p&Zz;vz2?99 zTAhV!H;jpeJ1n3P#{86vN4+Sd`2Iia9d**`@`|t8hdCmrA<)}v0`|^z$(rL2QLtAXo#Txz3oC1 zzVmI_Q+keU&dkcQaV|aE+-ug%%9j;hn5r0Kt-8Hx4`T2kz}f?s(c?MkYeGHegm}t# z{(F^=mt(C=JaIehx%RL-mtlr=t!>a@#>r*EN;SCQ!p8*-C;P2rs`X*xq_FPLvT6*p zYnLUJNby*=!NfbX$$Dlm!|FD!J}>m?0@sdL*p*c>{U!qH1 zFUgt7{K4u8C;s#kxYUyNiGjo8J&DxUW32;+KqlUTJG2T$u-`Y_gHlE1fLcnV8x+A- zkz@2tde|7kn9YFIyGyJWNNt?$%#+B~CvcW1K--~J;u@G~Ato-z8aKCnT){H4BL#m+ z(iL`B1-d%ah)F^9kn=;$Xn~Dli@*+JvyUY$Q1uS6`>Xv>ZCq_)!Fj~#Z%_8%YUEsL z(&Z3324&tN8j3xX6{-zi4zur-;3rT4I{2DB4NEpIQn;5j7LL!S6Avw(L!)5Ki~7g@`5{RS)@QU~vq=t@u~8Fj)vqTv6>VT2;)%IC0yz$&R`a+yra9&0 zIq|FW_7Pye5^qEO`1~Gs`RjK47QlKt&N>BPmCSy=6kY?{Mi#F{f0S~}L4TC4)eJ|Z zA&lKcQUXxVkTA;|bq#zE3l}L7NfA*sA_@y6M@-<-+3iIYm-?B+CADi2W=xSRjZal8 zv`d1Dl;9CGKt|z7K}dg%9i~h!m!v`A*fwBHF`3Avy$c9~BWH+TCU&eHa0?rw5Th8R zpd_z~ZPD7bM3qlz(c2XZTaxV1+eJXNPxMF_$e`FHy;cirmi!XS2%)%$eF_~Ar{JT! zb_|o3TBmVL87QIPBfHiNo09a_+vP;f$o1Bv1GIac(E;kcx99-<-Y+SBh29AC4vpR| zYLBph6xt_k2u9^ACx~i=D=CO-r7t!9OQkD22*2uWUf9j@Z6Dap>TNTKU+CTfz5LWq zxdGbfGPKKO+iDQaG*7Vsx^i6NGRCDwsl5(A`(=Q*Ku!1|=5e!9<4~6M?VzntVO?bO z0-QmnNMntjn$WyBlAh^FhjzGRmMxYYcR>l2wp>~K{wpsmk<`V>$}x~P&=bfcoVnx9 z5E`xsZ;Tynbj2cL)G`6#XcrpsOQ$A4noa~l_)lKgjvC~yHq9W*6AmeZ21T@6uhpV= zo5pDX>i{e;iGfR5#w>L=z;(S%PUj?`eqaY)oJob1jp?TuSKN*XP#HTNM+-XueheEG z-kd3O!~wH^1^ypX=9mLv|1&%mlg_XMNO8!XIG@$&1=yz{(s$1IS0Z_de_J=;IGEB^fgG10ad|ui-1#}u(R3ZkH2NF@;26+M;g{OBofIbp z=$wPGRiUj#LJd_(Dlq2Yc2sI(Nd_Hg`z_%)G5aiyev@R2L_sTO zR%((us4&44Xhx(RhvCvjfV&b`VkLp>u-xvN8v8*b!WJBYLl8vnFsBI@-^;tP56?NJ>I~U zN{8;5!Zl*8E@sI!Jo1$m#A^euVo3YDJ+ePtFzPl#lCH^K1a&NsO?0Ct~GT!~4bo>BS;{8S02IF&u&Eofe_^UXh?#YeX zhV~-~_!n{}yh$BHj2$<&_Hih3S53+_Ydx!7Ae6mVtfCo<9(wA6&*4ZH$di>NJCe{v z)sAM8W(;(yqu5Zi$!+2#t2zG;6$tu3DDHhM*U6mkNrA^XIA6qp?mPh4c44Ln=3A*VZ#=!_q&SYZ7lF%p;8tIRLBn-se& z6$Sn{;m2LGB#B>3k&)b<5-;+lU=@{DkT#_$HT3Cqc<`;YXS^AjWv;B$Kww@tt`@l; z0JR9%rHRn^@j%$%ndL=fHKV3lVFjn!@LIski(6h=tpix1Mjaa7a2_XSj1W;r+xuY@ zDWXmLc5br>TPH6{jJiN)L#GWH4yi#;X?ms^UVH=}{mifuqv7h6T|@S0nzciEIpunGsB*%EJ!`sH z^hs+QHKgE58O{mtm|mTqcMXQ7{*p{ZmgGgW?69wG)u|MyDA{>PF_^F zn+uljs+Ig__RO9c?VRDO!eN-O(ir{*Y_ZVK1sJMod0O^M{j#21HR&QWxs6wcSzYq9 zG+e*{|1UaIRRYO&wd)8*@v_zfWV4sRoI3JaNRFzZ#+*ScEV$4`ZE-skhF)2N*s4`8 z^~Li=B$vujaS*s*Sl&)HOB7m-ZkDp-6K1e2$209d^)`M8uqjn?x+8&mt%gZInH))0 zAET610136jamL)HxO`Lv>|>zF3$lt&J0}`i-^H*W3gJw{0wP17f436ZiOy>O3h-r@ftZ#j|00=SkcFYP|E4D?*KF z_ABDtV`dP`5S25-Qj>6Xm`t*=L@RC_1GHi$GiK&bGKVYcx^8ZZj`nj-jDd?$V@#95 z1#M|YMBrneB{?$KjhFvI8eyDA8b#AlNk*4OJWTwUU zGQImdpXWfv#I;khhrD{IA)3l;!dbr%>CyAd*j;}BHX0Eacs=qxvCO*vgX5hIY?d}D zf#UB`veb14&oVkgl$V5h!q6p+EoH>a2Kx!kLO)%=j6^CUu!ykR_I$pvi5E976$|O4 zcK$u?ZfSmjv9zK*U9MG9)+3l7-R7oYaPR!~{s1Q3r;^Y(b^$qYmX9JShkl<}*}l%TX{mFddnrKOb1 z`el0shtoa2;ytF_v1+YvcRS>>sfv z^x!ZKcf^&A{G0p$FuOIcvVaRegNx-HhxW(KuOq2gE?%Z~u#B_F4Q3@2Z+4G`+d4`j zH5==IdmULXi(3^ocBW-V@xu*~V%_$kAC42WJlWum_b!`lz=bTuHuLsIuV>vl(OPRB z=kKwLiG&bIp%g8hppG|@F+=J{EqD$QrRc{sO`Gdi1x1Q0(`-%{&qO5f106@6Gii|y z&64B9HO@nq6umCvJcU3D^OsvPXOWRvYol>sg?+a`HM8m;)*iUt*D;3MeCT1MX_Ndx zPSk=XKr}fZ86GWR$V?6EN!tj3@AYLZZG=LB(;&16V?E*^L)&bGN&&VZStt%UULP19 z?Ee!9nmpg+&O_C037xqjg^VRmHQlVzf6iC=ZxN137vl_%i zYn#f*Mi5N=L@?Z^7%G7b4>1dIn7$Xob6L1|IYH~q#pF!*La06FPWxALEc8O0K%b49 zqHWv6nCwH+aB0zZ=2i|-CF66caUogh)LrSMoDlWG>U6QN~lycW|k*4WrO>;PQO1AvDD?rkClwIcE+rTuwt*Y zB&)TeLPbmS30MsC*=GS?>7tAB6O(j8J3~1_RkLh^6PfcL1x8hPEB>?%M)UPJwipGI zTClC?Q3GWD62thz9X!U}ZDpyyV%UONI)GMiYR#vItA%vT$~hSIFL)rkG`y(71)}mf z^*EFpEmuD@Khm=$A20m){nK8DN#csa@8)5eyUtsDoZ)liPD;hdUI|@RhzN@##{xPfuhp$Qy7SD>&8I6!d;##Z1@wcE z;|8?H*?I~dqwj=bU_42#^Dn{;*K2o_Jb|oYWICIP|pShxK#FK zOnve(%(yFJrfetr1oh`ev1TvEl`i@PJ}b8)nco%Nj6~WgPLoqqtN(d0nQ&I`Cd;~i zn#uWFq;;)MFD#7*)KAU_#5RIn9cI3 zlQ@0}McTj(!b8cI)Oo8o^JR>7?Rq~XJ(%e9X0%b6`~Qw9)U_deVfav3N4iRd?SC0f zXVPV+Dak`oqVKo*ZAb>qMdQ}27~cU5mULczxB+)A`dd#%?+gz`e2($zQHjB1WnZt~ zwtIe?rP9-~F?u&_27zB^{~am@i^-Zu)G4ZgWR&~o{pE^sfc*3OroIqFbti0LR5I^dS~) z3nUZBeO~_j>p_VDN!@iB)8~|m8R8t@=W{cqVhzWT2jgE5pGleHE8^Nr>w5s}DQKqI z%_)~`MXbVvCjHQd9J0|d6}eOth}kqk{Xb+o(^h+{JdsL#Uxym*f-3d}*NWRwW+kX% zEEPEJPd}yrvOgou4og|mrC2lF;`;HPJ%A-i3XQa{yG{Z&k2vXc$#H&3g>-Ie`k9oP zBX9m2)Vy}O7k2PzkJaN%7r%G5U9=y)?G40~)dU_3@!BXX=cuo3jf5X2<4?FFD6DCe zv>;uKYq7*m1!K%sW+EFC-3uk*j9)S>YA7TQkL2f%kI8)*T2&pAEbqplXq`;+Q=h3% zFOrd*_TD7O?M~{2vk110ie>%>W$zpvY1^lZrek(kv2EM7ZQHhO+qT_FI=0>E*tXG0 zPTud^GiR@JW_tFnTD8`*p8x9p8TWOybj$B(3e`V-|R|v=t>!6_nYeYRsl}oaz?>ed4f+d zOr!LL^#f;%(-HOcUS%HC<$wb+hwF zJ7SHZOM%fbBp~9t(cYLO1okmvktxp~u>#yG%42>8npqlM`b5$|-Q2SA~eGcwsmSQEZoAtprO;Md68_`i8CP^OH`l zs`lx?YGnBhn%VBB++66=TQTd;O*O^*79>hdkb=cf1kRSBVA>1we!(f3a?7aGw*F}v zdzB0$7;QaPQ<(KxfelD%Qf%=YU>SD!)ILLY7};Q_0>tT{?slC&=5XhjmL|1H;~m@H zdSrhZ!D^|!5Ngi@ohlx$ibu;<;UwJG)H-Fb@%6LNN%L#Hv+QA3fQZWL^Zdp1f3U@s zta})3c&rV6h!@){va`Lvs{T%weKzxyCK{s$LK7s=2DNREM%{Nz7nf3mQtKCNj~w=Z zi*e$~xMBB-He`>f_b;A4eAytkY=gF$WM#lEXoaD@i@88~A0ghED@Lye%%I+=X8o z2XWbjvE>!RXm}#;v$Ws2InVOP1{&!zW+(?h(J#HK3JyNgmKoFrx7c&el?hIkXCd=J zfg{=$S7?i6v+?V-C&1bxyKkGKL$t-8Zxp}a&in9L^q`f_x>2r6X4-{W*h4h;l0pvT z6yMmM?%DaQR2}xk89*7DY$U$0JAg#MRPwf+6*-t5%0TlpJ6Y*#Q&Tk-t%v z(7E0-K(k><_Gdz?9qQG&4?1sc2@2i|W+M?oD?J#EtC^(qW4!r;QY-71NDGJ}i`3*$ zpx;BKc4wj`<%?OEYj#EHPc{}B49o1*Xk{6f&T~XL7Ex_;HL58nb)60iL&;6j{C?GT zHmE1!9Xgy<>ZV^_^FLLMA2?Zy)PBj%GIg?Ar<6DvCNwZ7nOm&WddWzRhSIhg;5kwI zfTNJ?Y{*2`_31A+W^k>Ca24eBWdysehpY_>K;jI0OSv3(Y?-CPFE1$=gHuWzI~Mu}L-sSDN|BbWQl#NLCQ@>ESEZM`nw z1Z(7O;uwbjhuY*c+ujYABAH0y|GTz*N= zkpxE)!C7Ti+9EkovdHwn{Vqbk09-lER{1EvLa(y7H2h z0)4sUb#-^rl*$NZn^a0t`&07$0^doz<u5Ix?y1RsxY2yjRUMZ-? zsp@!bhvqnNVv4vvG^Y_CN-q85nD!V2#78$o+)zG9>vuw*XYi1x8(nqs*L9Y9IvtAC z-5Pf~+tBo)6+~C+FwkL~oY)cam#p3MqjR}2EWa}OKy%Vi334*;^xNGFdURKLr1qS| zo)H65?r$yE#LfLm2q9$#aNHnf15MZ8xgqlU?+nqq@W;1B8gO-y*ajxlLF%@>-Is0{ zyAGdGbm`DR?NHpeWlAtN6)kscOyIhTvf-{MXL`NHv#z^udA;+!zvGQNK8wo+#~lz| zM|flMx;zG5nJ4FQK=dC?5A47k&0UkkJW`pFcwca1bUU>6woR8@^Ozh94T&w&7;@{_ zK6@^tG2K^jU@<4!-L5HrtU+KaQwMs_K}ZTS;D7dnkbE$i40+uDjE(b?1j5@~?-gT7 zvAtQESLpioy%_sXYmEFln3Rul*H7fjRe>$8r0bC>&5(@v%rNg7pV2Jgv-GE>l7k5v z57XjXoW-fBR7#>xeSt!ot!x#BLo$mLLwOmAr;PY|QdSJeq3`yQV!b4}{=T~sD{+cD z28L+KQDp{!?%z*u7l&RED?En|x} zOT)pk`cAPD&?B6Th}50}?L!&0o8{cwA;@`H~LSUvnI(P_1%X|-)T z>VC7TR~B?W4vu7I+?l%!+9oE=RzM+PCrx`=D`1M}TpwCIEZcK1W$+E7 zKe4-hJH4Xmkh=(!Qu35*2rU4VELm(WI@>h-f^7&*UXf@`&m>^24qK@TO(!F9Eu>LR zPiRCOiANdnZ`Np^t8e9^gZezFhpl6d-WaK(16$|rcrDHOBxDa)bwhv^WP=@f&qoRC z1zpE0-Tp?86&4r^A;9%LjK!UPJ-TQc7aQ)$2!lJoXxjr@imMa5dqim444dxuT7GA6 z;P{eN0XtGGT*AFLJyI+J*`d*Vkirm7HPXpGlO3&Uutc54cHm@}H=6)=AY)e|TZAZ+ zsJoGM^I(6e>&CkRX1!}N$TVDHxGSq*ki6e-0@UVuac&_mvAD*YNU;i?fPMznXG~ zDVeevv$hd)*J;*I7*S&n0adSI%Ps!PS12DgDYQ;R< ziir@s24B6t2=LX>@i^d6~b?>0=i2kBH(lrMgN!mXIJqFO2hamE6l{1r=YI;!3 zuP#9R4gjX3F$@{!0*}XU#WB4by1&JQCh!W^Zm&%?XYQ|~kYP7({uvfH>z*6M?=OWA z$*WA0qi)r(&x%$_3~Xv8^5ZcdD`dGQq@Xs6UdeGm$r0x(rqdb@Q>9_W=S@&I;drBfOH4S(KvJDr)#x25FS)MYa z&1ujM^wMuni1@rf%bQtldkzcC(QW$~tq1LS?%q<4Gl^$RnA=a|Gm?c)#5-}C>hruC zz-+>ddlU+3T~LRuxUFXb(O;dxaz~VJ+C3yf5P8Yx7)j^@p!11Ok2tbO>7-%T28q&1 zh~7E0ywiMi+h_Ziwv1ZRb;#*keFxsIEIQIhOrD(qW9M4bAIv#AF+dVxmf=`N1I)r2 zpd}1==h`#G)eA`>)B2>)33KI@)gBS@rx4AQ)=2_dP-xL^gREum6~T6BGef66hsHYF zB_oR5))@`2T&B4P{rzBFuNjVBTs?8C5 zI)-XEa9-V%q}bPd z(?RAlYqj}Q1#cw?CN^B~$YIzC&TvpJKtfW*^=N)NiBoRfnmHk{a}A>->OeJFF8^`Dl$3 z*J-Q`FcM_w=%U+Qeh2=?2$w`POQ^p_-iCi2d4DbB0@*p5(HYnq7@3>UeVH7ck)5Ln z-GA=ee!cmRXIgg~>k`F2+bsUC)t1ChgGf(tVi1+M8l?!*)<{f)v3OCAS_6NTG@Xu4 zP%V|up98=VYq&lQEE8R9S3ffzw6^qUfwpsx$l*}+0V|jUmKw5t6_hqJ%>Ak^jryrn zP-+StKfx&#f)bCnC54L0Y1HN|eUGVm!V{3xOAxV=mqCFj3isq(HM~0m5~C9Gw@pD4 zgLA)Vq1yv=Pw?cC(5Rq!VaS)3t(vgI6JvawHB}?sm0Pm8ktFOwiDM> zBhkaOzWt}-jF>LoO2(FVg4F#!% z3>ACQ3uGR4w=_Y99C8$=x+u`$xynu3CzN0Vs8;LPuBpiok~fq>b8j#Q0EMrNc%|j`j??9dD(&iYe%^ zR}YYb;b9gJ1!#xUzBKZf1pxQ(?_V22;Bj-lcqAHK5(aHyYdVf-W;e72mM{vp`}teD z`?g@>8FTd9t)ABchA;6HsAg&`Rp_#&x+xY6M0tJF8_Hkp8bx9LAh0Z1Po2BL3WNCgT#C~oQ2yp`{0lf zRm*?e+e;E=5_!10B6efC$x>;I0Z>p#?x&hd+J$H~Ik&hdW_4<$)*c3+A) zG!v72Mepb-P6<(otM$N-tR)gtZ=5Zfa4>)g&P{nfzcGZa@_vbPjkLgL`TYj0Ve-%8 zPqw`%wd+HZp1UR((_S1Ba=6kj1;r7Rj|j$V7`G;3?$=&o1f(VhPvGb3ed{R%Cz<<} z9IEJGP*}XwTtkD0B1RsrO_WMra|2{J_z`N}0BvU)ufTw>$T*^!Iv2m;fPrD2`~i-! zAe^a4#*f0E2tTeHd$yDglan~9!M|iV?uM#Nq>zQ9;(b>PP^;~JVJm^fs`MU)?g-9ah)v?f`X|!z$l~{nMKxZF|O8QRAc_1@2$odTm(Yd znC#C&S3Eye*(`4?_tV6QaaW_t?}N#MYWf1lEp)m2J6G_UhNj+Tdv!|H#NNvdti>L@c7VV%vBCZY$9^l zS5YsI1P9VW(OxmnOsFZ}J5EMsSc=IZVdMD_MAI(+9)w1w(&zWXLwk1l!%3xz`7F}i zEHK(9#&$d-Ai4}WLXa8qF5I5;S#g2lgd`Po$F zr}R4$V~n(;X@PzvvYd2gf*Rv$C#*@(!`F`edU>o{T1fTIv1Uy=aP`HDUka60o9KLT z?bh5~h?Y*>;(p0{xxLH;swI+z91;9&KFiof1?Nz9h)LF(y^3-QOqhEODFm<>Q^c=W z4?=3qgqG*{#d?FeF|5wV@x=zvY?e19aTbvGuCdxNm%GxoT*6_jk++y@ z(8cxfs&IV}4a-rZN$S?WEl z0$%>pv{Fe3^;cE!x%`CCzS|#?3fxI>$zLC(DhWGadx~Y*gn_lTY&0ZCk_LE@4d1;6 zUytYLDjTTl7Xl8!eVMJI^5rgBd)HL;nJ3TdF{oCYelhzV7lY4-bXs$B(?jPioE9hb zC5YWBKI$G*V{@xiuiP9EI)DSVs?ajPo2s*58sW4)p1?P7#QVn%8!uOkk@83=mFoIbw%J8-x@_u>i zk-YseWW|i&Y^!5&#&`rkj$gBA?RWsaifmV4O|aiIOv2_8y;4+LU0cBy5g?mlr*cPa zA@nhDA|5V(x4T(9L$ZTHoY*`q$K@NEL*`C8(Z9C!qG!pbHTE>jT^)E4M!c+CjG5;W zSI^ivR25%mD5=!0I!?Ao$mKjWfzK>+FDxVYK#Qmhw@CV?1HfN~KLZL&rvPvR38Kpy zrCgZuOlNFI)D#C&#E7-DwKbBIr$o2QyO zO$6%IvE%LDt>ex6$oc8zVg}C-*dC(_8Wms+P=^i$}gu z$k?Q|j@9^6H$YC<9+OTe^!+>Av|<=Z*Z^qlc|)yc?;BN?)? z8S2__Ym4xm#jR7vvFWpl#Ouk#v$*KpIQGU%M zO^|oQv_+&&b!@jg55)zAAd?&owU^MQdn=%C%ZA63bN=k^&v}m=wv7_-IiD^^ zei1>iC$kD{*6!=a^%pB8h?46LzmQ;$&LkL`nr(JwIun5u6~A%(1`X}mNX)>6%2GEzlk6S1d`EYgulIB~`} z-zb2yiKcu#Pa(lw%ybJoT}2*5id5Zf9105+I()5q?|R! zc;^0~JSjeEoJ%n|F0Ue!@yl>GJ?4J)y=*Ak=0R9~Y^?Db7)IDaW)2k>NR-3^JaX(` z9sg-q;-oVmv%jf!GUq>`;QKhxZ};pHHaqrfV9WUk5Ndjuee1Ko5wAlY*k-5&KCo?& zZZ+%TcR&!oe|uC*zO6~&VGBG(kPQJX4?a%bVt(K}(8F%NH-eyf|Me|!?(>s(4Bp}% zB4%&VH*Z~7t_gEWLj$(0!l+(Yc$1E8G7B1%r5z%7Zb>D*R;I)>_tY8*JzON3m?tl?*v zXw~(o9;s)!YXP`V8h|yTB^u(rUs!vK9&@^a4fkx=`S@% zwfsE};|Bf}()E{rF=u(D`mkDl`LjXjzxgwfzxgv66DKDFGZR4rClg~?J6jn8XCre9 zTQgM?N2f0z_iuL2|F|NWqx^4vPLQ?B)zqsQ%I!$#5`HuR1x?;&BxGcMkkH@}6E||H z)eTcut$w{5y=%ol((cUXqG-2P046!8`wElORd$op(PqZm(N&xt&|&}&8PUNDZLMJpsdErv;F|Hl3Ukj03hmCEY&?#sK>ILJI56E%lk=;P8oAQxjKj=js z_Dtu#)FeAYn_kChd0=8Y*`MBE`bCMLYbblopB@faA2d}aG0&dNz)pdt zwzpFr19WR0_t5i!s)_pN`dh`_t0!>uWlbpK!kA7#w!zLY-+JjO!cKDo)PVg)H?6@e za9ysZNLefjmP>o&ph0z8~Hv&Z0XG0 zOufhe(15P$OH9PQT)EfJ1SaR{<=Le*)uIu{SOM@x%j1fIE9Q#D*{qnw3}}WVL4i`?TlTlO@vLHj2tcOzY5}i!uWsa`jD!O(wZrPFDN)xQE;3-(ot2Zgr-pzIYMHq zcQE(@vu%uoe|+Y>RH`0UKs>gDsic|bu{7NU-)Kvkc2W z^SzY#UIb#H>NK5@~qfK0_(8Hl#){?RTqf? zTg4|r7wR9V)aiz>CGT!Yy?u^HWdmJq z>a*5SrNc!etmIjN9dj#-3Cg*KrBlRn#kAQ}v>%QM%o2XJ@yc*PNIEE&lf05h(+R~4 zgPw%M&uYqKMBR=g&ZpBjYxBW-^a=jDRyingMo%TQUiNE+v}@~L6)IPCEk9IaGzDio zLFfw4n0RQ3#Q30*lKnW=V^~t{Pf}_@a1z0J=#n+YlQoMyUD^8&?wdsNiu_~>5wI-o zC3iyLlebX8Reo&T$byz^wp2F3Zau^`uf zk7}@S9?2z$-w-v2(P^l+gBcN@O^6{2ZJp~0*^?8O>&NN$y~;9pMi4_2Md(-2?q#{p z4}P*rwYgoCoH;QwpLuMxE+Ttmid93X&2J0r*1I-!@u9@=i-bYKEy&ZQm3)TFX=)HQ z#45w-4c?#L59ATKLn_J;!$8asfzf>3E`Rr#-PN{m7v>IT#~pYj z!7VTxt75;brEJf;$6!Tb+KgGgEnFu|-`pfdpIeSU@410?3wQA+{M#WH%599YJtNoh zBUfO9y;iol?@cKI|GO2lXf--=CE|F)&vc zLCO^O={OVf@BIC|0^h;pV{HPlA@Ti>K;j#u;!y=OLDU0(exn{z#i%Qkir2;9)J=8x zzQGvkAUF)^X^f5@pMaz&1!;|)wv(?Usu`~!K)YQY%^*O#1HedgkRApQ8+Q?mW|^nm ze48GBhK!7kyGe|$LrA+}k)3!37sH4gcyI_kwfA}BFj0;m0T3acVX}{?l(D33?5TGo z;6FxMxUSA6HM9@V3^C9kHOKchBQcPalnZf}#>EaPFYGoTDoK%fNbj{;o>0ukxU^>n zX@Fzcd37|@dWdd#E4R5Ugj>~o5qRr(MjGg~f829#;O$e`ny<@`90u2OXU7Gu{9wq2 z7vg1vm=6l#w#%|`fo`JYhG})(aMi9w{NNKfS}jdH3ASBnP_TuHH~a#V_&!7cH!)S} z(*6A|lDHqox6-A6l4cca2$SM`ij`)BDIANZ*9d>O%?hA$4JwjR1?JV^2+~m3?1Jg`>z80&CiHEVbM%M&iBD>_r z&%t`Dn=rdO?gXRqscIpnw~*xGA%G1v@!}6uODG2G(O>fy^5cvuwrjc5jla3rBCOH> z@R+VDz8L5fk3yc-z=X#2Dk-OwVRxXR$QYI5uUbKq0h5Jho@ZJGBIBJO9xZeXu^40` z0m{%+BuIY@$;Ozlj_TYH(yRu_6<0 zfD(_vWgEabbpEg*75kA&2iHD7nVFccLK?De$p|X3Y^FKjd^L$)jj0GrGj_+7C4i}T zF6*nnU$jQJ&@SNi3i->~^5qLbs~;{A%=vA_`5k@SnQKqC%To`?a(o?`J}l)2^vRpT z^I>uCT@U3wV&=xtuzjg1Y>Lxbo`9_4zHT$9?^t;IZ8>j-YC)25I|aDYDh013WHa># zxT(4BdWDg>rn(u^;?!CEVOf*^TMR)*J{`1+O>DPT;&)4A5`prxwRMZT*u8@o4(XByjX6k0GrEb>SHxbQxJ6VZEXy1(YOE zb-awhaq$}-Wij0RYCj~aD}x{EY9g4I219hk{q~{9mYG-n8~e^&aF$c$!?cU~@VR}u zn1{L}40e0EtLYI5Opa(vd)Es^r=*s6`OSRl%n_KHb?Jg21E8$3`VXMX>myCwTtjY+ zxq-HZKa;nim=$|vj>+Klkfz*M!~>#cr9^`JG9ce+pI`+XQ}1VrtWtTM9yGrBq};R2 zvpI|}dSAfa4Ke?96X*P~E%uz|nML>g}Uu7P55z{)3q(=kJC#Vdbr~q8-dj z0N?s-%lAUXX%=Sg3_QavJOsMTC+HAWr+_XTMGp~enwcRmCeA!hTM*%j1J|5op1UEi zXH6y3BxdfD4Zh06Jd>$>1EUQv_z^ zgVnw3pv+?2ivTGC*?5}a&pK}hVh^IS?@QAy6yCLDh`sTg%lQm(dWc8CmpS|s|Ll0I z>rdl{@3gJjDYPFU@+-mUqww%x9zz8E{^=IsJG{gE!qtua3dQ{!SJ^CiwzYe7$MOA} zPcUhHB$AbNx*(4ZjFLHJ>8RA?m@*;u{_t)5alTjbOV2;S zWt0tq78Lx8n*6WbFpj@v(En4#7!%WA+s}_6YzDOg1t7tpv1m*nUm&bRCyrm&o2GIa zUDvP@+Se=E26-k2E#*&06qjbeHVRQ;X0H@jOf6hkJ!)jnLxc&8X+R^(dhaBn|)gwnJ2obEhNW#5WA#<$rGwAYP zlZ)hT{g}?L|IhVT3Dz9IkZ_P2F4vo+yhp zIArAyD`1k{aY%SQF0rk$TcB~Szw2)E+=XbwE(xv+g2;e2&2MA`xIIUn%2~*V)94<5 zr+O$H82=Y2MURbS7x9-c-u@~OioXivD=8AOHTwd-`uFYg64ePeWEJ$!4brK71~_@~ z3FQDv$P*_`JTRsGB?!<}6i6Eia?Tv>g@P05?2JWl@2Em7HCy%5QY{`aiiV?dEche$ zYk2!BzkI8k&ZO?FmthlgHUS09JdPj5YZ@38qWdf){R4oY7skTp~+sG6Wae|4}M z$)tq4T>l#qZ~S#Za2&BW^$s#r9(e5M9si#XNCT|bf$IV;SiVJi%ZXWlsC+fyF;vm_ z2>L>IgzP8~W2ZECC=g!gzb>-U>)TcZuE!U|mFe^Bv^i4QlcAJ9`9ffF%GYsVWv zjS^9O@dz{8ZbwIJp{LzoFk|HHHAkiGCOcAF4h0p|D^BTWHzMVAQ_6X{e{V86w2M)LgfVEA?h;Jw)yBUvBUmTl$OMeJFQn?fO*|WxQLf( z0HYNyL8OwNY3xF3)i{j$3%Tty{8cRsd&%pEtBxE-@LQ-)S}1l&R2!m zC85^`vvGVG3)#pSg-f4Mrqk=1Mu?b_&KQF$$|{lfBbou?jo$2O9!y5-t>-}cb8V6x ziM8d(z#Qia)k4yZoGd%#(BYOv)-sR7=tF5ZE054)&v~UXRj^zE{YeC7?pxCGF zw;gEFLXqPnmcgqYy3q-vO zEL4QftlWdHWnSF^OOr4?FKu=Bjn;e?Y5Lg|W>9CZrL5NC!hNn@x{%Bqx#P#3(>RP&Wv zY&AF}%?!=0`m zW{xUl@cqaE8SNU&;-jDZV;J2C-AqB-w3A`{aTnU^W}0*_=WXBbpIF~Bi|@GmRY?B+ zYaz+-f95!qOq`v+dc;oucAze@46Ywrr5@^0-xSn9El^KVg;)~+6)C{3c)Kt9fb5YY` zLKjpS(XHWr6@@&Ko&T`z*XaIyB+ZuPT~FVB_GkiRL8keg{x0v=vF-h|nuKCPD9r!_ zBSG*F6QN`fOp@V-as6{tLXJ2xlV1lR_SYCg`9B?%oc%vT!vA$fIg0=8oG<9OPIM^B zM@FJ1!7RxxJvLZd?n$0yR-vrE7NH69dD+(N^!7gh) z;ExD}?z%1}NSfB6XJ70L)RKq=7AU7lGJBTU6g+u!3S~@vYCpR>>m}*8Wg+sB6fStH z`7NxNrGLk876m8VOHwz^Sz(}>yb=IdDvET&&VChAEhfk-^ki7% zMzqUmP43qZs9;~V$_XbkOja=<=f&{`DOWUDs(u`x(RZOsyp6UL_U*YUd+uEhfpbJA zfY3A|Wkl$L?UivSmXNP1lhq-no>)1J(m{Zr1WB#be6!W=g*u06KT_6DC(0(oET0+% zmQ*i!gf0yuqC}YrkJzTlp7}*1n_G?K@~!Pd(1q_^V1pmAtuAEA+$dl-NlG2WJ-CJ& zghn``+>zb5naCoIg~Z?OyEcJup~l1 zYnsnZpFk|^2JuM;uQDb>ZzT_g9&73|n{|q~2;sz%A4_DC=YBa_TT@>6ZieoxuY^Z6 zK#C<+jS<1eq;L~}UyTwYCUhZ0V5hFpW2bQbDdD75dFSF77^ZVbt$eo94fW7M=&?Eq zz;+0ytjQ@5d@7%niz-8xU!u0}-S7#06Nb`|uLf*$s-Ce?gq@=4C((9D zj0(;bd_etUwDKDzq6A;!`1AGq@5S+7>=@ZOnOXdMXH(F{*7yq*^=p;W(8Ain+2h~d zEk@Z!Zb2T!SF+883Tzl%NdO)Hcp2^#ro24>x;p?Q8UYp)xkI5fac;VnXs7NOWPgI! zoDhM9;aLR9FZpKORR~9Fd%|gk=V<+i`{_RSGpTjR(nC+rVC?9}bqOo${-NNxVMn zAMAymL?0pmwJ_NM9|FRD-SCVZW>1~_ThUFqq#V=3luCgjiRdtr=_>THPeDu3eL-NMFqw5!xrL>8YQq!@#Thsk7IA#`-qmqiU zq|6_rAEdcVb|dZVm3vZWp|)~LeHoDr3X7&E%BpmykC*612iqAeY|P(rFg9jUsn#^6 zG!>deZi6#jT#vbZ0BKW^DGRyB(Zgk_l9>$xp^vqNs1w-1U8Z!EYnfIx=c%JeW0i>Z z*Jz85eJKFz^9Mg#HPkTiJJy8**fQC!jN@zz3rQm-cGzqefqAp6hjHAs1iJHf2H1Io zYi3$*!SjPefT7&7Mp@2J{x~(!XnKiGW=(Th|9bCmRgQ)@|LpZJJ=~43XQ=anI3!VX zV}IZAXhQ#Q%}P16um#9_#y_Nen&*vssr*E@_r?_R_OJ^UwPcjuV=5f_p>}z5C|OHm zkz{k~O~rBVV*P^LE)XuF{{ZEjZ!`QMu!ToNg=)br4B~)C;N}l!gv81qgS2?Hnv7a3 zM%5e%R1o!q4cH1gzHO_iD+!tl)GE0oyS(;%2yWVVAx>HxX1*vJmFh{ud_prcoojR4 z1?kGQ+(p;@nwoyw@)}_ewCkgxB(ZpjusCk2K;~q($o6zNPzsvE!E2L-e4T?J97)^YK#Dvc1I~9t~5T@=99Dx|0 zN&KHdynpPbTfxL`?Q4ow^w%lce^F5Vf9*!X*3|BQS&Z77lClc+r#9M($&b;-{#$ym zo0fX2GZ?&pO#H*sz+oou;t9uqs0rH3ApEAVl#~y!c_b$}n01v6 zl^ajvC<`)HCTf(~yvk8F!p~mle!5qXn?KL5(}t;RRI~~_(*>iMjO@Tt2un%e6UFW3 z(R*D$IcJkeoX99CURV^gv6GK!{s>Q*tiEwD;3;ZM?GliWsc5uDV!T8`3V5~Ml(%dD z6VF%(ny${ih>odBv?D)nH(su3Oh-^euwEqJQ04G6?W(sn7dxD)di7yyyRzb(UvpMI zv$iVQA_qq?b<<}{GJkmW-7&zYmR2Wyv#8ZMI;84;v${QPy16xUFTuZ23y>uxwo?3& ziwpImxFV7s_;vw^jQ68}fJ9izgY!6fVH9t_*j~1}-5?Li3L67j3^Ym1G;=Mu*hMpf zIGw@*3ul))s_8Mj@=d&$-=~LfwK|%f6vphG^AeTC!8StW6(fUSp5d?}@6DY)4y77y zM?e1T2Ng)mOg9lRiVU69ar=Bz7-9n^sI#cc3LXK?{cobWW>-$+De>a|zY4d;B%wU&qyS5{8NJVM%q9YQpM=mJ?Q&p=!$BwO1>wu#6V?@C-;}Hp$KxdrYAKX|U zP3!)N(WQuYXch_?nXhOKrI1BT9M1!1diGow((t3x!vohWGemTL6TI9J!_*EvG&& zrw}pY&f^_h^9Y zWr_BHF4InrDNt3U%scgCO(&2S&obRVSFuz}TYz2g)aNMv@c^B~2KDhF$q2niMcmV3 z@ALTc=40vHz9mhzFN~_dpHu_JiM57HYvkg52@?`t%~7x0pZ^4@59vuB*{??RHtgS$ zJBj~)uKiyHDFI^x`>!nzVtGe9)34sFtevx{-B;!(;%;Q}uZr#8K`iHANjFM%O^e0C z@~h7^emFoAY_CsCWOiPVK-FR>s?4S_hHA?8H?fFcD%nsFUoRd>x3GMD?s~`un_Vay z$kqGa16(iSGEKF*dP}`&P=P)XL{%VTB!IeTuzt?|>Lr>Bxfa+A9jhWx^)9qvq1~kg z3Y+&}Gsk}aeD0_h$fjBeO3cmN(G@Snb zN>cw8$dEdeyYdp?13qK_XNHFWIwU<=H&R#@W1V?_x=-rInSIRgF}YOO1+_mCosRo+Fu52}VQyp1S?L zJg*s^r`-2-E~l$aEn5)!I9cTzC4dINPw6H(vP^}$ZWjzdSMl5ysixdjxyu5mt8$kc z(gnOJW0oX}l|)d=C|aaSHmPV<&o@xgD4n>c$jVa`%F7pwLE}`;8bHe^9NUMKQlcmx zLxz-6I2Q{ZK+C9=D;97-t57hj71Rsfp>RqTyg<{)n->U*Q8kMf7&5n(ZZQHi(6WdP5 zwr$(CZFM+#?tSi>nKg6&Yu@+EIcuHIXYXI_UAwA4ZSHmRi=i0zYl zQb1iGy##mOfIBqkxQfQNFxiJgR$JymByyGa40)Ayz5JoQQX^uZy@o>Q=}=x0dx}Bn zP+ux~&_R6&ZUpg&v>a1Zoa7rZnpmu?D}(Sa1*$D31UJNz-O*R zl5en4CJ*UT6c@mQRSK*F^v6$Eg#|MSDiV!dNDszl)~uoe%r2wn5-X~InguIH*FuRb zAM#|@46KbMJ!~8DT_+#vVo7Ycnpq(^>>E=DJQqR$8MA5p%6EbS}I?|K|$F?M#%A6aMD6V;HM`Fz>!nu3Xx55v7 z9(h2o(Mg#%mX>U%(JwM*X2aB9Vay(9(da;fElqArYGsna0^G!@vZS?_-ZVQX4X6tj z<5%RCGA-H*7ZZA?=jVmKoh-$;`m-BJ%|uIEq^7MBnzg8=<=6EU_-4o;7O2xHQpS(} zHuRiZZ^8sUD@%P4E|3{y+!z&FAFD1Xu#ssuVdqJd9Cpu*rgI#9HUTHuhZ|O_zy+tU zAWG!*^clp~z!7FGqPXFdrJxw<&PQZuQnp$fuO+=}L2liM5l`SXWen~+RR-8JXf$aN zS`E01vJy)`ZJx^l?T{w`VMT>|+C>fY?5jbuf2R+QGjCtBVqm;=>b?coqq<)jHpWeH zBwL;nOt?0?L^@ek)3xHLZOY;wxiyownpRA#)V5&OY9dJnKX4;@ZH$1DOLLj5O=h_( z90EY!cyi^s#;T@3wpbSS!PT{B-palbF5WLRGklA*B=T1JKvb*}tUR4onvA&TEmlkI z=Ij)Tknz%Kv9<0X9FM%yESfSVF-Lc>+WQ@lT-wcx+1SI-sBP{Nv&M@}XS+rj-B1troGTy3{ zj9z$7HIQVws4w>K){IPGqFam=2(CveiDh#jgT1TcGR42Mrcw8jo0TsuSFLumnMgbD zuYn1aa%p$x64rG!dC=;|bJ>=ZS5?L^3$ZR8(7LOT8j!JlreBJ8WgR*juMncyuBePz zQ+xla=~!M;Gm&98XF@Y*&NOaFm*O(2G|p&Ddm@`RkHrqC+3SC@>zf5%1pjNGYdSEP zRk52BvTvt2wnT0mg9tzP`WkOs#BPO+rCJqLLG*IAIa5_TP(X}rT}zuN16YfymwK*= zG`ww<8kOF=xD;7u$Uv@aYV3E8Gi=DfDVvdcdNRL1n%H}>O1{{(YO~px_{BwxM*M>L zb_M2>erFY{-4P`Ya+yHyKH~*ZPqSnt%O&ER9(7E^XsI59@)i$E$q-*BO~OS}UNFjV zam0$*0y%`aky$ejcFb|C5*Zbi78fnkbq*mxqiQf=m7T?~Hz99ib6i@zD0Pe^$l8jW zOxHS=exC@>PB?4hFuo_~Mt61~f*r{`uHx=gClrwo7UJyHc9=Q`qtFUcFtL}G>8z8z z?5Hw=*r&a{x|JBJISCTN>O?juhsVOH61|WbKNFtad2LD7j9IFo>OE+{W}~~^2cuHf zHi}9bUE|RWRFjs;7O4q~CyQ-17U#BM#j>|3V=0nYxw>UV<|q-3`TBBL0uy`2=(1+P z27Q0b$)oDEYs$c<@Otg{sbxBSgj;mTCl4me@gpBxS4e9f1b`f=gspoH=()M(#|h(K zp3G@&FD0>At3;CRwzi2_DP=ZA`*U5Z{R^2qF#V*GBbmOSi+FDr6kPrRaEM>EZdcDy zB8rpy>i2}|@p&_NlWW7)w*k_A)tV5sGj^n@|5K|IeHNVQ7?oLn$VUfcPJs1*4o%=B zu!|VEDCWeHxj2W$E|>gmicZ2rEh%iu&tS3i{j-dH&Ylx6&%pK4n9Vrfug$@+xSq~A zu@WqW%tpx8>|p%OJd^fpk12}FFxt>+!ENBn))xHkD3wwyYo3yvhn)K8GKB1PGB8UV zN2jsT(WBXW6(@gP>u0~JVeJ;EfUCF2DdRS8es*W1bYzO|^TL74f#Cg17$(a5mv;2S zYwF{nN0P-22H^W6zslg%%rEr~$j&HJ>hG9|Re*j-4=q3D}UT zp8l_IxenH1quwcxTnV&A4Rb`jYyS+7GEeH1CYw=hA=SyKf#agITIQhnyS>$w;BgGz zMCcvmq-T%M_0Xt?y-ibAPRdWoCqL|G8EzV4mwjBd5^81jOiBw-iFSEprMenNjZym1 zxn}}JfMUSl)f}Ev*;ZyY{DBq}FR@Fle71l%SPXc;g`Y!N8dG*rEArD@!{0q!- z&u;-0)(vRqrkaPRTqpKtStL!tl>}LDS&orui7xCT+FFi&#l?P zsz~OTZw8_;G;1zy@xE84zob1`mC;$_N?jULO6fv6X zu2G7c8D4Dxw4}BHK}?C0zDc)NH_5lV&tD|LY&!I}Qp@eWF%GTl_Su5B&5k`9YPjT| z9JbiDJDDSg!>AOdUNHGLKf`ZHl(<3pvX9ibBkVVni|ig^hVIp((QCq8GlxD%gH!bO zjoDw|Id7rpjxJhIs&Cb+9lJWi&Tr73cC0r@yS4VUx%<-YQJ(IqyxjuvVT6SWW+~DK z@`+1QxfZFNkp7m$y&J_3x!3-LKfDWQdVH#~*Ft7!F0w3lK$Xrf^MLl?S33SN_-J4* zqWKu#Q=nIS-Z1M}&~Aktr2Tf~Xnp+XqCNkyf!^S`18wGEj_#Hjl%E3T%@3F9#9r0k zs^uoU$sXhwLtXVXAPs_>OQQPR=K8cFBd?Gn{V>P9k((#Hq4laEEC=fmm>FLTMiy#g z+R8`hG$b%@p9U`-y?D8;25%@R7HfSLi3_aR?sut8{j z>Wp+_)iTpJVL!4N)^cU{n1FQzUbbNlw^=6KFOxk~o`P7!9AU$#LBK}H_hO1<%5Fg2 zOniMW|G^<_9bkV@e9wwv-zF;mJsI%-Foh@>+dG>(7+Zf|@cetXsk|wRtcmbt&9`Fg zTCV0#O$1D}W}}K$ty8Uf1Kr@0kKQ5lZbTX4(rS#}ObdBC368w4n=_>9y~~g6FNB=` zEB-|k^ElHQddYhym&wt@$MlHpICItK>-Lt(4@gU)Mz1 zVr5*I4ZTzALJ@J84pn-kUpNIS?U%8WU2R5$44sD(v!fVOn^Rf*Uf;qk+nkn>Q>I_; zKFX_8WvFSpcgv0oE*l>?f%J{+ zKg}1QT&jA>^({v3g&l^ZGGsE(T66%3}*HeE8w zh`S12<1uA2fn{V;2a)mZ27!a6xjzw=`eFrAVt_W)Pc2QXU&rXGJxQE!-RNg4sLoL} zG7j0(72DAg0^7-JWMiL2GKqDn#K_bU(FcP+qKKJQ?XiY1;=M3NB<|c1(!uyoD%7Z% zkwDm`?qoHL5fR^9uq{_ij8t;4kD!$HZ=jGwM6E#AJZ}g&#~7F^e#0-G6sB#2Sg*un zvHQV2=!1jV*QD1+FiYd@4S}CrpRH#An1pA11#&Hj7f4PJ;DUZ5N6i2QWW!9Bu?WN< zJ_z7y`-d|20R2N+xTdm$L8CfzK?2s3z-k(BwhU-*KW!)KGuSD*DqwP zUc&D16~~?7F8A7G<9H){JgC%R_IX7@k-eosWE6fegd?3b#<>T&S#i`yyE$>(qmD`H zd-*%?5R_*Wbmh{vO)P)k%xL=o&utVg?UhlkEbnU|EL+%5qG&JdhY@6#3?dhu6lo6_ zV$=Rz5xgT}aS8imsrucoOk#&yiBZUEn}_^0XNfC_wLh}M=i!6=pkb@HpU@J?1?Uo8 z^4=H}lrn^ZvwDxgNv01RP8~!(xP<}b9>&p;J3u`uy|XXF?2Y^LuZkA`ba{39W?Di2 z(T#=kpY{sEZpMbr|DxoDZCuP9Y;FEk)c@WA{kyi6EM<}TQFsKKH+j)sWAj4ggo3*C z97-)RHMu&^ z>~wYU{rG&v?I9rRJV~;b{VwA-MiHaPv5^;oe8CV^e7ai4uq+ZBBs%C8 zqMUhn_&lJN*`gfyWmK4*U;z>yU>m9y+GxQO1^VE`0Q&+|DT1M$aPlJzjs377Yin6W z==3;6Rf%Mq14fu#MaR|x^aWI%u@%T^jJezHy+1Zq_g0jSwF64-mJynXDAp*Sc~w$L zPrl=s!3kKbLpzwuu$|1K1kxE9nYo@vwA1l6^QjC^PUFljE%F*o6SSQ@;6kXuI{`K~ zg?Ce6%_@yiz@)-h&V+8c;Gd}VR>prT=p3}xj>ys=Qgn}#8*mxj8w}#GjUI2$Y7)a8-K95gqs1gl+! z1Pd?_ScEiM7WEKW!^<)Dw&q3|k04@czy2}iQ7AJ+uol|&=PY<|U8B-lC@JV!neh?G zrZk?@e_dJp1Mt4J_3qbk5=&*S5sm1qJiuXH&$iAF*Lw3N^U>8%Nu>8yn4aj4^@Cnu zJ5D}b7Yr!foQcdF23~AS&`B@6ja$0YLx?H6uw)p8MRdu%kNiP%0E^{-H2IW)Y-)&A zDuxhi`4JWgmiL6297q4BjGzE`=t8r5Qq6XkW*FbU>Zfev#CPSJ=41Uwnor>WXZ?Ja zk&C&}e<|m(R8GGSDp9^*rJ2kbXW|t|h3WPD>C6K{b7&zHG$^e56$*YXn=?v>6fxPG zC}^;0*{N=zZ}1n`P(&0|D%)|0PTghRXVCG_&d+l_Z@O-KKYwgpcWvl? zy*>f^DZi`+>cHdO@7Tj<-|U#f*WpFF-&BUi2;m^xYYhw&$%MWL4TvN56Y-+nriD%m zdlBwQA{GmIk?vU{77Is)VxThU1PGg%5_-x9RO-CU>;?g}X*fGmBfNDI^MVF_evPSR z#8~UR)CLY})9le8(Rg%Fn5H23Mqg`4XtgN4zWZquFVi`hF>;_fd|^4+!ZfS=N{RM1)!^Q1%Q)u0Qez) z?Q=j@*Nm5sh`GVSi@V{r0odH0q}7nN`hXy9^JV+w3rITp>#q&O`aIbwcD*-#?SgO< z?Fv3fvV$o1siz`M@h9BC;w9N-FZ+ZXlX}GfL89N(03zr~-g|C+jBb(fPzb;j+cM>l zfWylyH9&usS+2p4fH;tlT8@e-O0Z74xX@#P(xkFB=fC+OHJhYy(F!ic8+DoVW!5ez z6)Y;itj=*vyqhEAI(Q^kD(SD%j{qzosd z{8JuPP-zfBSzEXXIDDUSv*4zyrAoc7EX{fhA$Op~xm2aPQey}Km4%0mr)`%+G4bG* zJ6;hFQz8oH$s-PBcwSWF3dY3d0Kut!J<$m*?K=`e_np!jSw^Q2Ind{-Y>Qv( zJUbiE#6U|~T6KmGI+XH7!yqf^2BokhqqNq$q|oVYhzzZ%0;G6%G~cIMQU!CM_=S9>Me2kN7a?d}RN)vV-P!`r-&f;?O zm3r_@CUN4plX-h7 z(Y;%5z)f^3f-P4QMHb0+%RnFb!tcG^(}-; zE|8AH@<{idB6c5x_B4@Edd{BO)@W?=zIGT}aNoXF7C_RQEAlCW(uQVe2$lH#BL1({ zCJL;O3+&ol!L;v>!&q8As%VR%wM08qP_Y_{N8(2E%W^nSC^5TF91FF7&&2QtgR6GO z^K<7DAF(#Vr4vlor8g;R;$Ekv)dD8fg2@E(gF2exwq2}#V^3NQt27vbb0P?$Y!*D~ zlha)E5u@xva8h>jum{s+_VD(Fcy#0NvLU*VHuPWbmqZ4M)UaxBMLTdBcvD1#c83a~ zmDi~WhWrK9JOi#`u*w!JE{|kE(anP5VjnIV%X`j!|C;F7tqQQrdZk`+=$LA;Qc+;)}Kf3^*5#s!GBW; zKo@=V3y;5rH$s!WUqoO;x|%xE|T&qM2Am+Oux>FQ6oeR z45F={nOiIzy;u&ce(F&14SgLuVEYCGNDS5^I=$Tb4%jE}aBlu`pQ3PI8IdPhw}JAU zh-!NLaJPvTd$)I2+`={oTJQYbo(7A*Kds4rA_j3%m2-R(!JNyK!x6KDSTnVP5K2*& z>DS8R{lxaaUdZ2@RAY7D9HaX`iedgQet-XyV-$9?`#$TF(>JvIHdOd8O!@D-PwV!} zy5Hj|FFvC>x?X&lEJT&_n~v)9$zKvH^iewc{JL}bDnNcNfoiJ_+i?NL1>)Z1>5BXm+|Wv52rhvE?Po;VAh8z?4wqy!u@B@A9r{L&W>(Ik z>+=g`hKY+6R|ObDW|MUhWliOWl5G|jxFsruX3PYdZQ2YpOovcb3=g8%?{y|khq;rmS1OY0Sxn13uMo8~#u=lN_?gX>3jq%}daa^F7h#^#RBhUzO1 zhgT_~@KC6tmVjA8hD@(}TaRMr_in>(1ljbjBX;qH~e9O=|WG@RUlpF6~!+TkN zR?6ihj@YP}oh|FCHaZ0H)11Lo#*)V^lQ8|1&?x*49A41yXq!p!!b6C5FgW2(A{MOB z4t5`3TJ%(|ms^@|NMRZacI=_|0An@ms=*lgir`ch;rzC7nAR-J*N?|*j^Xr!n)*ZP z?sG@nBI1&0qWDk(iuIO=hbFr)Tj~{9qaE@N+51dqn`xC2fhqDMWi|p4fJnYB7pgGt zW|o)~12NQ1aM7E9Ty$_HJj1_2ZyRTbmJbg>>Fo3s@B-v7c!h_@^l|mcUTdSU0A^V- zdUvZmd$9}?jT#-;v?X*IvxIm<(vJr@WQvtxz>Wzd&ZCD9>vU2EraC;fW$W z#ef_S7JbEzd#0)#Dmp3+LD3Et9i^LkYD8a0Gy`Zggd-$6%0^FUz%sxMcOPo2#}R4? zkk$(bfaNYH2%$zI=-pgrBhM{t{?$}uQyLeUE-hL`w$xDIewE|d%lu%Ke$`ahu6LF; zl39vbmd4bAShf~=>JBugbr@Nm!O{gPJ#b4Bk*t+Gv#IHcGmPBC35uk*(EG(lSZ=XC zeIX`Tu1J8NJKTGXa~htVLV!5NR%Ghzms=`56NBRDK&01cwSu|z=$I2+FT&NWwh+0PmBe;o1;($TD zrOwhVfyFV}cu$*Ko9=_x8U9Y?wWd8E5Ku-D0WZ76`5`wYCb}Ta(po0`PwU99}MWI6-ti2>DE8M(A40 za0`@{?H3QTZ*9s^eP%JnD-zk|j1J!9pfY0aF43BHiw-49&69^4{u84m<}nV`Pk1y4 zrUy`|)m(2c5w>M8L@YdtJ#km-Ac*T#{i4n@wcJy^pn4eZ*;;{lUu6vaAo-%(aJoWq zd`9u3H6)+Np_pCgZAd@>T?P73YB{}c1r%=4T_g2v%}hM zeuAv8-<#6p*+Au!Uhfl~IO}{*>XR-;kDATn(5u$^;p|>Skai-&$bPOVpza>Xw$S;O zfhy_vdCD>KCd1%--f@OM{hUl}>{qtL(a-cT&wp3nGl}g}99?7Nx9|7=5)=C#tKR=| zIk2S>hgC}sXdw)GS+o*KZYiXbuJz>2-dT=CSwH9Z>hY*i!X?OjC3EAk| zTK%X@zwSJWO3Wx0;wjYQ_cg_&JgDO#f39(L>E`OODRj+G+7j+$&2;6<9Dce8d1KYW zJkMa4Ds>=2ey@)c!d5s52kT*b+KcrtFhI&&6qql~ zt?3X;3TR+ruL+Fjb4#3nP{27sI2EPKq~=N)Vvb31&BI^9d_cdJGJVXz!A$(-Ys8oE zoZ3vf&@0|Cy5e}|zUDa2c2xDbp5Ok2`;D(7Z!a-e0i`*Z8UChZq%=`JQZ`cVMi6Qm z@S=|}4Q9^}dK~a#kH81HSNu40>kPK7XU7$o4-?IRp>LPjOA*KbMlNX9vn()U7uu^$ z$jrDJN)i|d1BNNq3kmiNm;q=Cn3;5%;ON;=2Lge)0_;Fu1rPqRf(gRFF=3e40eUxK zSAw}%)Kc|sXu;5#xgiw$1!20PC}FqtdtL35d);B-i7R&zQCPg-=drqC9I^0%@^=}Y z?$`s#SN0%X9zil^Z%Cl9vistLiy2CHMKMfc=Wn>7bq+SC?W%)QZlqzyDa=~{^H@4V z@xLui7p6q+O?oq%amo?2>6u8W(=(!>UJSJbk?@es!knWf(x6SgZyZdK$g={nvM>>5 z$}i##O_7LQW-(*Y_K{?u8is>evA)T^>?Wv!(t+q(#K;jMymhyMF9tZlLvn@IfFyPslS}jR6P(7024##XwR@GNI2A$N z6^qOWudMo{6_WfOelAO{W=IIS896%ZS#8*ipb3SAv7)~L6OEerZk~R^Na)Q@u5tlpSbNxUJAzqM<8f3eYqBBnDc}$@Gry zmGqWUGHMm3YC~b9Mi)k%!y&4U2pN@V90=_|D6>mcwjB$Qrq`F$g9M_b&P{1V>%tEV zRLF#R4A7T@P#YrZOeqE%*S2|BMDHxsuAFC3YUN0688KsU)#FSFc)ecH5@l#&_KA#S zg?WdOZsMSs>g_GTe+)V_55NI{n=(9n`H6eL6L(r*q)A!f@ zeeQohJGU;C8*rJ)qUX)I_VYgr_GFhHD-5<#=k_tqcR#TmpSv9E=;&-7>=yd%HHyt1 z=Q$-Ud-B24=-!^I2>`>RKSG*x7BoopK(6dtr?aLNcN$6v zoSvdNG%1Z^=a|qk+b<7)o%blIk=5XHc$j|g;N3IMa(p<(yuANWcXOK-R>zJ6d9!%- zSl*I`Y+0(8LPzxig^bvj=vWD3{H`v(6tZaCAaRRtoEE_1&i)-JrJh|@K5yJnKoS`D zI~DFkIo<+OR%ctsLgpc)L^Y6^Zoa6Pen+`s@?HWcy z*VAdf4ebp6w^ej5$@y4l@Og6X;|Y~}&X8if1ibpQ+Yn&gW95O*-6o5jt+&URdA5j_ z?k{)cPc!fb6&@WC5Lt6cOvJzwTrk(20tbJMnt%+k z-xJAf7N%#+J{v-?6oy%6b zbk+F*SLth}XD_ZEfKPtnd)~g6A6z$tQIn!1Zy0a4kZ^l{oK6fI5p-_uaEM*w`1sYi zn2k=LaL*VxMIg3E7~1%S+NkNrR+^$O7|n)UZD1^aO*df(%4Gtmu7c=>W~rtKh{){g z*&|ZfLo^56VZxxSi5nq{j%OUwIO4Xsg(Zmw5N^nd6k4P!16XgdmF$R^umV=b;m(d-ypnek=%$u zlpny{-KkLPajy;KxFwL-BfP7j=>84;1@f;!oyXqthVGjNy8p*P{r}>E{yzux|NS}s zk0<)yud%YWBDOJ#&z4usUo~Jrz|5K~MX*(0lM)SW18Y>NPyz~!bcV*TdSAMY>FVHQ z5%H8;Wq!17K6<6{Ftv?`e{|o#6#L^4^p`)A&gmDzbUP%Zzt)KGRQ9tM&osyN)hPbg z=N+{llUsjC3p^9>5hx9WoAhs;o;*KnU>m3w(wq}f^yoLl9#zs!5G+mUZfsmJXv0N*v`FYnm2=*@{WVixQ`2 z;T9`0g9l$zR1_Y{S*H^bgBWclEQ0mFnB3xz>c$YnjE~VHY)$6=3I}ILnv}Gjjm2Bg zQb}^QdOpY4m4NL0jL9=_ueV5Au*8yerXC*$h%)A$9}pMLBB@Ij$t#H6#u-qj%M&VB zdWv5vY*$fnQufwa6sI2{WZf-9R&nOQ_%O0laRjwXkQR3dvVc~`Yp6FamlQ0y;g-a8 zs&1N;!H4B`FIxB2C9-TWZaNmPlZ@PaK1Yi+!AF8wvsaM8xm?2325ztm?h)+fUpBc9 zmX!=_wvVCaY0YjGhD@`b2llGpRQB@8qkol%-_ORd{!pjmupE)bn!(-_pm_bcwTJfV zBk~L{GZjH1DC3j46b--|G0GbJEAoV+uKmDWORo8f%Oi_GBT`8FV7n(*zD8h8hfwa( zDE@$)u%9Y`uam3Tc_#1SAhUHMd$MI#wk*gJaQu_*b_NTMiQ6y5dO=nc z!ve#|4eE=mZ%}Id*QJYx20{oVKvuM1kFYM1z#pT^AmGUme>((r2Omm_KeKYFDGw1>cc|u^Ed)1U`9sYk z{5Qk-j88IQTfWz@_oR)(tq2?WKG&w!!@vmFMXCM@Xz|?ZlWz8~G+4p{?8+O$3iwSu zJfZ=lNq7u>1yK|bDq2<93kI8#R{%fiw!XyRbE@1eNLBd@WV6zHWNNtX z&&}%8#2=80)|bm97q*u8>GQ|VlQj(`Y4+}s>r%3!B+Li03T-{THFdC&52;*I8XVvSTMFQn9)colcN`$wUpsJX0Ja(F#~t0( zvMFdFq^^Sglo)MOl437TsBS$FVin&*U6v|PTga>x6KJ?J&TnBr@3Zn$R2Ly4H_V*G z{&qc?6)G5c^~tJtDF0(9RXrJnnWaFs`}ve)lfR?Wm*YDQGe$k)hO3A+szq!~sXtAQ zrDkLgRbUL4dLL|^W^QuFu%4cKFvDm|A~vYfd1}hkma%6{nR4?MkvYzZY?!GS#2FXW z*qJr5y;W9=TA}4eO06qg|D-jES{?x=Hu=|Wj2_lUUSP+|X8*h%DKa{bxHfDnt_ukA zQ80K^WaELko_v5k4uJ+A5vi?-qwPG{Q#3CSq&Dnom)}D*ao!1<6e%&)wC2PKQaOPE zvY1SIc+!X_qh3g|w^?S*G|N##ibq_(q*vrVIuhslU zozu`&n@3p4S$rtJsNb1M2IcXAxqvGVD+)7^l;nbNf_~pa;Mk+v;xnDC?;sI>~SlL!! z+HP0e4Vy(G=!vmE5zF4OY0ya`V?qw7_7NiKwMKkjg8kFLU|A;2USX&7*$G`Lsiwal zr4G*y<4*%DWaCDjmgzd}K&1k?rfhc9pM`0;XFF=$4CjsX38U1_;)td@#N@HQZawVG zUG`0B5GT?N!7mZ}<$??++RIZJi5n0Pi@YS8s2kUcoLj_!k6XB2zYr_(J(R7fIJ`SM zziS3KZkpVSqVpS?tx5Ul{BWP1up1`kpg%c^eN4Bo#YR-!=`mA@7zYs4X)7 zmg++pzIv{*da!p&HJm7Fy}iY_gxw9{#PH&32$k$mq{Vfs$R24nZ=FtaqNagPYqI(n zJ?2`Z$UXydQ-JdWz25`B4$d`%xj5R`IwEKC@%XHbYhnNX;k`3J&WQ=`P>sTyr|*?l z;1VIve@LPisf}{S+nNgF6WJ#B(ZF7+u`P21#pUUWwlf`BZis9WxIq0i#%A6ryg_zZ zw}CrAorexnooiEe57|eAQC*$a$ZYExdX3kbe8`nb9ekEkok-9BN9ejzby8oJL9RJ? zX8|huGYq7#5t(entWsdl_g^s6@N2ZP;QLyl{~uwd*gtK8WWTKir1YJ>H{^;=4&UFV z?j-!*S!^cyhED%dOAFds+v$J*ZtL)G@TsIJi!6`uDP!J#>4I89QAK1Sd>T|I+@Tji zfg+V|Cq8hqvtGxONP0=P%2WANjPU{K&A?y4i}B|}erUCcBq=~2e=0L=)z-xHJDKU@ z^M0S)4`+#}1B!P{)fq)2vt1-Y{G#GG; z5p2g4Okp!jAow^?5I}=RnJOyMi!9|yRJlWmN8uI>2slnvsPPMd(jh3+Llg%sA7Sv* zr%BV`o55~VS*#3iqQ#jnL%~2gXvzvN!ap10U7iq8WU>meEEK6X(+u!M`I+AO_4YSq{}Kfm)3cD}q?sc7yf!N*YH8Z*5s-)o<1axqAKG2G=+&echu zE301}`=$9(Wx}S9c5cbJSDT+)z_UkugH-JR0?nH76n%${hF87L z`r#Cpce6xxMRg|3e&CjRwGNp3dKDL#uc3#z@g{xCxy+#h^L6~JdB3A-%Ly!_U}WSx z$c09+;yAs;Qn%tL0?X!NQQ@*wp#H%H##{pD^$JDxAYUSl>Pa#`sMP0|GsDDdyG!ta z@gE|=p<#KZikIKAv|N1~^RV)DPZyt8^XMJ#7UWvz@cHUD!pO|p2HOSHWC2kK?nst1 z;hrd>1=u-BgD~zU;*znK?gvgs*aoN9a$SA-{82`!E(q4q9wcl1_=upxW?%7B?*dN2 zD!hsCHf6o6RSXCH7A56kCsdx8HSGJRndF0hQU~lCvUS5S2{0G_hVAr(RdT{#7iv3-+|l z-(-^eH-Y-!lS#k-KNgF$gSpZFStR2!>v9M@DcqS@&D>Cgzs;rn_hJ#AKg1}Y<){fs z<$$693sJSds^kqspQL~K&UT*!OS6e97c`ywC@t1zm4y;0({RUdIbNluWsFvDZ+C&X zg)U=mjQ^heJ)T3dTM95m%9eB^{H_;Oq%kBaMupV)I1!m(l!#Ngjik6%Dp+nT88M)5g$wFJ3UCNm4e_UY-StN#}1JRF(_}=YUmI;2BGjgDl6A(ae#PX=LOBO z%TU`$@#|R&26K0iXZG4BH8~_$Y&|(AW+|WpweCoaX|6O0&&G!^F!whjE^ZkH@reMR zGom1(8s$4ctKgcNY>JK$%?#Rn^fgpgD4pnI7iX+j0=?NYwIaxcu&J5Purrfe^mPYk zuGg(DB#yp3V)m90DX42wOp3f613v3#7~POZ%VU1an(!0N-lTr==g&O}lM*z9ZULR> zCm%Fn3rAGvvdB2FNBQ^v*fzkajni6v|Fb>+ktY4;N#Or@2L9de|LqB7xV~9uRhs;Z zzZagW!P0069e*H|Vs@Zl52Of@4=>nQE!fI6W0ex}3+i7d1=>H9Z_R&-7W2@KEHPdI*-sm)Q zX2f-zMW6LQI6Fx-b+2$sG#}lTTS62$3Lc;s8pgi-qGkXXp3RsYhe;&4kI8LKJdp@? z<}0+aoLowLs9cZ@OlY%Of9j4pYmV($n}Xnuri?v#5``OOv-Q)HFyuz9p~tx=U77kW zVvr>WocL>}>X#LNFS#B+7`6s)yduwz>Lpn{xT8nTI z?h56QW>>0qe)JzbPbTovwEbJ=Z1ax>Q2!`0uyC=awfoO=5y$^E)=X{FcAXW$SI7So zUg&iWj`Du-O1~dZt9Z46#RZ|A8VUx2A}l#gu0Ucg`D6Q*Cs{jkzi|t&T}lD;_NbA6{2=5jy*f#R;sdGrC&yQZkS^KtbHXa)-DerB+B)AdipZ=vkjot-IF43Nit3#t~SPg(pqGZ zu@uN6Qy72&Zb#Otw5lP2OL&U*s_nOCLSH0rUl$(0PMU6PUse!&gJ{tY3r%4d!6`(j zB9Blljk(6h*g`~gBuShjbrR!GhvbLgwdyxhT|)!Sowe9$X%ukVKFHKZD#vD$??rP*2-A`=!gai?gAG=I_K}47YzOhhA@CEt@4td z6FvoA)t(t|4s^o>A2DXmB*CD)@Nph;uAN>?6%G?xq39_ou2}E~(~5E>L_Tc6ou53Jn@~w0)U?>KamZ-sZTNGt1F3~1fZoj%_$>y(WxI4JzQflgx zJie(OwHvR#5VTN$$f?WyEZrZ(DMgAVd4KXUg|Y%X)E%TN%S7A*r<=Zkkw~iPI9u-Q zJ-1^pJJ*Xm4?k8aml@oCd%9r`X_tL1d{<4Z$aGX6nue!v&~(~|2%2DiWIX?MG;hIl z5!CZye=8}9v~Aes71omr%zn`{40q^h9?D?t_o_?$Fj@_+T**hCtuBTzJu-jU*cFki zv4Aq#C`~Row>Ehwg|9b}n@ukV$)6CV_{g6?rDn%CBP;mdc@$}gcHAZl=f$E$U1QN{ zrxo@`Bx)fni}-!RrTw6dtNkHk>2sld$+H^y#8&g2)IUt4Ho#h3?W_jR<%!$)1B_D#U_LgM2*cPX6f6u+sLR{K)E4I8 z;2l70O4RuS-_rYg_a-=LtHjI{_RF24NkQ7{9eY?}DLtJ6!XrGcrLQL8`M!1w`r3ch zJ+NvL?ri}J0~VbA;aAx%h-auRq$$U3a4H>-A|)*IrVExMCf*Ta!)ZV;eTks5MPG(g zkamXCJL$yYGagTRWj_TnVF4!iZfo+SXhKS2OyiR({job!@kbHwfhEd7B78&-A$ZC) zJ$8SGn`yXm6(TVyepfLivir|)EW{rk}WJSsVf$Y zKcQL6bwffkEi@1D|EXTfoRp|~`<^@A-j*x*BBp^*zH+ z1VX}-@wA-wCW1F}w_Nh~1*VC-Afnm}2}upe<73U9_rqO0^9zd!sR;=UFyOm{>yyKU zvXku9g!1C2+szFBypA}Lx5q3N%0y%!*$W6iXQ$oE47Y~~Zz`V^EW zjltk(uV1Yzk<~D%E-gwLb1*(_8di;?2*oLQ&-T0PrTt+Ge^|v0AKtfJitTBdLHl@Y z^%SY&CdfHEySAENzMJ9Gkp21!Ju4#2?0^d--p5Qq0T;9_mPVlNgu()w%v8ry z*lEj@#axi@rp}%OJH8(TB}VPwwO@pxQwcY%<@o$Y9Y4%)H zC9y*fxiqedM-nBlO4RKBX74GjOq$ZJ^Cff}su^5&n}dVB96@R*Y4RL!5fUj-8?dt; z4W2ZXUu1giF*va_ZJgRW>txzsKiD}j=>5; z@DF=>?dd(ft*qjaB)urt^Eg)^G5R;RdU>C2%bp^F`-EV}DNk%0DE z2Hh`!l+eV$ZK^T$vzc0U!M>UScPPg57V!>t%ZP#)C_D#Mi{#qUGP+%=PSEf-_y>aV zP~u3=(NEVNN4EYI*i1bq{$jgKOj*Vu4HM3&5RO1GjBYiI1~*Qneb7F;?g{&tE2Imf zp>4Vclntr*ZMc+RST+E!SuPF{;MNP6?njC1ke_? zK5(kO`&Ya6pBerW^-E&wX{)pbHn#*a39pz=*DJ?| zgwVqYMrzL)&U@-by>TUWZK^~yTQ1eY?*Sj!mN(?D1we$Oms&FrP;@|z!Gpo7S_l!N z&SO#yDoYB`^K{fHmdGm@=HLgR^pNl{c%klmKQJ45NlvDw!)W02{l!S-`ax}^<5pF6 zR8GiG$?x8xw41ZckL)Q@5XtcLmaZ`o)6u(;yO zcp9vA>J0a$UR}f2x_9e2+p}(byNvR~&52vJ1v(kfMqC^!V*Wx_`?jXH1^E$l`8$Po0K%sm#Ai?R zL)bzsc5vzrCocxL;NwqoFi}MX@T<>~P5dwSxcL5ycc`Mijj1uoKkV@S`0~G}IZ>)B zw#vpRAM2CT<|WP0LX_Ofv`$<~sfsR|^NLM^TDie+N&+oe8!mJW%zaK^V>deygDs*_ zKtvHp84#l4xuh()e5sn~p{ z?8x9>Zl~iya-lougtcq6IZk>R(VulfTM@>@zFGuVqAHT_xf8*2F2UA>wvfpgE665o+ua%WGCEWP>V0uag*#;QCdo zGfYP~A*kup9;u2O@d8j30O#Txxx$?Y<-8ygjf`^Xzo<#16X>#ME#QV zyg7xpRXcB5ZGHz%^_CsY3^v1#D2D-9B{Dhi4}V?iSV>K9e>i4Fy@7mZN!O74hNkXS z4wZ!iWx1HT-8(i&4E;66rPcX)mW9ttjqWAU6})Zjaw7ACye8`nu}gAls|gAZa-J_r zKcu0neOn*6NKt>@tIkXE)d(cSvJbY(z=CRl@0xH(fR9$Nx{;9-@T8wyG9hD08rjLi z;!(wrj#iZV6)CM*iA<p?wxgbBi9U+}eS4EZpW2hYxhe_1}?=#_%mkvJy;e_I-q_C3L<7}XfNdE-1F`;)md_JHXt?d(DVl@dJ zzeb#b>&~Zz?IP62_t0dwA<#K}*cD~431RXZr&(dIqB9EV`#ponrs%fJq$NaLGNl%G znIUp`Ep+{KB&eAHlxh{pe%T&v7#`@Io8x8&vF|CQtVvW|`J)GDv$vu4w=}hr1lPNQaVSAb= z@fW7A+_m5WCKq3b3NIPy(CrXnLbq-Cygs_8Sw6+ggBmrw_8s=F>l~K`o7H)A2$NT! z=nKBpmB8y~$2f%1_lL|s%-{(l0A~lW|96g4< zM^GrAWUkM%ZHvW~XYam#C1FUAvx;u7cz6d+R3`H}<^Xm)J4>ujV&cp!Z%-Ta-S5gF zW0@*(?LJM7U{^P^HGtgMsdn8OYMUP$T5Gx?UXoDBE-p6}oG?tyTgM1y-oxk_Pj)I% zSv}f&tv{U3O>T_LIfTL(LzgP7mwy1-pGSZ>2|=eFao zmpswF2(LLjfEYV4U;7~eXHMK_{6D(h+>*$izdwokEc9Oy^?zZ`_UD$s|0e2xCYisn z^(d7wMQk+$-Y60l7~t7qKgMN%o`0~YfS;it%?<|#Su!mi7)%@tTp)&&kP+nR*K4wf zJ?b+V7yUk>YN&iF<)Zi~og|vNQS?22#p4P@K|k$M z-q@hrAUV>toj!Wp^U)WTf|K7=foNFGKkJ>)`Y)R^9a z+Q>u%1(+RzFc9`Sq4DQJn6yvdLIprw?Sw*Of?A67z}SPI)O%=(QO38c1H>?t7-IB6 zr3Cwh`;oVG5I0IN6s}lBAqiq*^lo7Rkk*|&!^NFd;7T4`h}y%Z6HY|JS9P2TG9dS~ zyxA-P2M=Ri z2*~Y2C@M~oR$Z)>#B4z(+=D{mHp=0bNMjxg^GO`>+DbvZIeR1ay6JG(HZql%ItD}= zO3^@6{cO*kP&*G)WKu3|*kj&4Pq`gzGCx-2bDS@?92zG#g~En3G?bkhO2F98BfC~^ zC=igw&}9uJRPvZgzW*6dcU${2&^|e*KG{Ek_uH`VmzW2%I@SK*rK8)`hwTUad`^q{ zm7_0NFY<2>-VM*@_|@zoW?|2bz}l2MC)lCCSx4$>gtxI0u6l!g8ua}dPp__RA$-;Z zzm#Km?(aa-P4vOnhr||uo85i@JyMHj19UieqC}i(1!(iv4VxFMUC#=2h7ELtn~kke z3E2>LjD!%;eX9{HP>##eBC1ux-v>lQhWJ0MD6P6^<;tzV*k*(LnC_T#z=r87w4 zip!-dEW_q@!rYuuv5B-zaWLFW!H)RpNs}y7{&uOuQvTq(n2&ssUAPcAr@GQz9loxE zT!mMl;=fUGb?z6KqTiqZ@-=;sWP(7lemtb6ohipREh=i0+vohV^XU1Do5RZU7rwl1 z3R;&?8k#Wh>njBf8h^0!l2(J3NB6J2CF8CP=SM{K#_khDb=X8VO}!V!6^uDh+HQde zyIG`kb4*$k}!4J;qY5`mW=OhnOuOD{w)SR%D|dX?wn0Hfp)aJZO@xl3xDEN4&V!u zTF)9Vw`bn1#d^J7^r9L2%Rk52_LHkqN}q(-`7a4E?f*-N?W~+Xy^oRg56 zQhk||n3S&FJDeMBA|)Ve?qPx#{#$Ps1GB~7r7vh112Yu^1M{0-TyUI!pg(56fgWhQ zGz`t3H{4n>&#h8F|Ip;;=id({|HTCVAAi}<+`-Vp+u?fY5L{aF~>uWY8Xtl!Ak(B|A+$r2Ph(5$K^xsjQO z+Uls|aEhQ{&B(AQLO&~_PDqKr4=nY};oI~VsAM!vK4~@~b&C}lYc|ST*mr+l{UoqNDtzy&0vb=j zQh9{Z_B-jv%nLr=^#$LLw`W*ADBFNg5JZ*Q@~z+iR?t%@T}pAPW%=4Ny$Z+)4CU=1 zYs%-sqZFdbPxk@Ts2hQvS`=ccfR>Vc$O0Va55$aIlgu0GxzRpn^(^~h-eIaC))vrpu45}n`* z`@=#qF{flMG2PVI&3$Bq$c!a2B1|UE5CV(k%UZ*6xk->3W{6VJEDr!pak4Q-paN@= zQeuj)c`ms{8!N7Q9R<*2!5C3J7Liz7uj&nxSMDBl_}~gMb6RKdMk2I%Z#c=IdKjuS zcY<#VD1zRqOq!Z5IWf(t&XMU}I5$-xK7;g;rCE=b{Av>E#UW6Q?<{mk3dkp$7nS`seD<{O#jG+Yn<~#HA1D$jiqVQVQZAGrSe<}iNgtC{Y&JyfZygXDc)yN66kldP z3F^NOsBiy8K>epEa*k${0v13B-t14ETUJ+JE|S4o>K#oCCm;bs5>K#L!)3!}Ya2Sa z*m_S7{zE2v1zA8o(`19k?Y1*!s@e5Ra|?x?uAX=cmYfNYc24 zLPo1%q4Zj@GM1_Zs*_cDHdT-SjIl`GD}7wHnRqqL*o8gS*Z7`LPhj&+VJ8hkn1~d9 zvMlZ~gDN6X>14=Ct-#c-ag2;Kr!S`rtV-9i{E`QqtvKP{kR zak6`P`j8R5WL7Pq3|~y6)M*FWSOm@f)D?Zy5>pCSoVK`N{ze%vrvvb^JPg&ZXMO4> zUyEsh^A4xNz2N#7TYNEg)33anl8zNI0)Nl=`5-H+oF91sep~0DB`jIt6q7fvOA%ZR zNz&&srD;fyf1H*Pi16Guqi?!=&R7YXnA~?}MRQJwFBUZO-j!5AoS;DnpV&tw$qp|> zn`r)>oN_6kJxZxMQ2ZUbZ(8TXI$r*bZ`9|&;`zDX2?(!b)~2iRAgC2a#zT){`GvwL zb2bg*LCty^lbq#;IQ?eO_5xKbYV#m8k!tqQkloNs7z+{BvD&a>Rwdcn1u9>`lXP>s!B5-(7T1m2LKlIcLQyLl{3^rcBtz91%@rAb^ank$tY=kQHq}q)T0GYXBNN0w z{b4J{ET67!A?}1XnR)7J!^!XK{qc2+kqw3h+pXMCWGFpajBC+(d3QRNJ+4Pa#DKZVAYiz~svsd^v=FB3;ot8_mRgjAp>M-oe10r{(t{^^hph2=#kP0+^~z8yu! zk7JO|y98P=>-s69iahFJ7tkRVTKu~0ya4p5gf;uUWaUP3ntkXC-HQ= z4nj?N1dkL;eRT}h1&^bSa}t)pR5mx2kkyG)E{FK%QV{eBruKHI!*bGTJpjRMKm1t_ zK_m%}ZJNMkcq!&!;keI1T%qU8QTG>shO=D1F=I58O+vBwEYI7CaaZq?!$GsKGWXoZ zKx*q?=OL1WWJS&1P&A`GhaKC<7oeruP#{&xGakJ_8*L2}**MPOdc(+?>~5|A_ngfri>2V7qpu!uy)}zAzD8f1(rWH@G#`XHxM0%WWp%{{o$V zc$fWWY+BpSep+WiEho=%iVQk!38#(fYG*x5jKqgA6Y8f9=WDz=nbS8GHx$xiyeG{W z(a>FkKNUnC5)mU#lX~!HW~UtT9qvDPeO%vT`-Qj_?sSIZf?`88q43DpR&|RDiK+@A zsRg%?^~VEH9I)shb0Vp+Q1+bHdSMBnxNHh4ckxu6l;!8cJT!*jppYDr?gzmaO#x#( zt7anpyDL*9XYs4>R)P1azq7Gref93vmR;JX8M3+~bTD*?WEi z85w=XhSW;ZxLNU<+La9fcQQA}%QSnSzAUa_eDlNB*$mgIqutdj>^Sfyck-&SnHUj# zm3#LOnXOQnbLJucl|-O6Bw2o-l__{{a%xwjv6@o5Y{#<^*=CK0Q`aQbbMg@-Z~5C^ z?k}m|IzfR=_q;BkQXsdonuYfFFFifC$?r`%J& zpC_U6_4P6D(B9j#2?Vm@*5udKxmrgk0Auz=hb5%LMLa51!B98g7Vo&GJ@oKWDF25v zVt?E&Rsv&b+((7J$3{%QFgDSg}3=CCvRCglkA4&kb4} z!V9}-@(7ZyF>wZl!L}RFBE^@0;Y5NEnKhj6Zqiz`L}RAnc*aG9vl%7Ra=BRqj^P95 z0jzuG9V(UY$qJd{n0p0;biV!`JSzEq)idn=ZTx?Nva0@JiueMS%dH}iMHHVsEx{_i z{zuQ0`&Q>RH5>LgA8JsMt6=R zR^|DFzNa)SHEB9jvpn}*FQ{|7haclW|b zhZxM{Re$F&2pw`Z+Xkq0M}1+E1kYn7m?GJ=1ep4l(c!>%dx8WtqG=Qjj3t{}-}EY% zT_2>;5~`$IqGPyzMtA1DxBSnWdr)(8byfGsw7t!m8swhz``19<^I z^*QMSh|^&fY?ykugVSZ5_!SRoEPh=I3+`QD8l`?}elSvlz1L(Av-7;P>A#rvDY+cA zJ{=lKSvI)AAm1f1NqLGgu=wCs{Ic7rgli>zg z8jL?XUT+PwiMKE3gFtec6A%c40&kGlpPLvkBb&H@-&o*zeZiN^ygx*0podkF@m{#Z zud)3y`Qy7@Nq2Rxf^X}*gp!h0-VbzrC9O-*5;>^)_xX_UJIM^t1Q|;vJ_gWeU8Ff{ z&CQp1u@8_}T56o=4o@`86Yg`Tf$ho;rR9v@WEk1%`oRgoE=5<3!%=3pL~M)Nd=7Pt zrv1A;UK}m0EjUy-UIM~x;dlpQaErs$1!=d~-c5-BO z@^6Q>G8V~OI$3@P`QsRwP?lQvJ`YLeFFEhOP+9#y9g}}VqNur>@#pUL-{SJ0!|?Al zH?km`m1?Y?S-V{q$#z}nRxkmS0tr?CLK09;vQelMZCly8jHK5i@hi~xM#(_&r2TsQ zq6e!0O~S-yhvK%w@w>E(_4kLjTl6mp%Me7A`Wk|gy+V*NB!#3#B$bkGH2PJbpOULj zb|T+m{8}Q(w3D~RYG5FNnF5YdW&)&`;tB{^3belE_z`~nfdGVo@+tL$Mg}SknW|t1 zJ_mKNlO6!NCSOaj<|&W5Hq;V8s;zSM1sO_}p_m|I6)HQ_P>!*D3v&=NJ^u%akSR;@ z#=>a2g_((f&ickaC%XdvWNVWR8pWD34&Bezykcz|%WC7fK@@9s7lDOv%?nVq`a)C?sV zt=w`d;)))C;rRFX!-V9YhDV&PBFv3I&E)+GfTjkU8)%e85lH>S#)se|- zQk_qOPp@ny^8q4u-af=~i~!CtC)RZ*jC5CFO#n0KMYWn#yxix2>D6KvqYynkVVizH zq2u9eEMSn<;OME!x~Hk}&0@CRSwV7R

dGJ0S-GlP#`^wSY}=3A)zD@5l61GgX2ho3yCDV14O z?rn$b6pntX?AP@mUis;WuXlO4r!Bf|_pB3)VT+B#EY{JuJ3xJ3e=!+lkq7Ya@CyfR z!xjfQ$8A#wWYkZf22q>kz=jsabrZIF2`dZ|PlRwB_6@G$Zw_w0#ZSxC#~Br%3X$kx z6kzngB8&J6qopy#rR(plm7^;zuhPL^HzkDxxuGh z>A#Ax35~ybCE1#|C!`F;e~Tpt0SZEh$E*wM-a-R1O9UYi$B{oB8<%opNRu$tB~tiw z2ySwjv(O6uwb&YKAvB;#-cNC{wYaFGWvP{7@oD{`d2XQPG3l8gkt!|NyEKK?dFku= zDM{xz<#L(D^KjYr@U;|S0nckQP>ZK!A&{Lnf0nqMWIIl{iNr`^Ph2prA4Ebrv7baj zI$5}yBf$-Pt8brz|?rp zkh`x0gU+BOb(;w=#pso^jRnYpSsFz9qsht4)U|MH9_ffQ?`8oAsBcj%TrfM0CLn< zBsmH<%twkRR5*$!)@RHI$|u-tr7Ngxl`E=jg)6LV#VfA4x8(f}{|=~6MOW1J;31S( zJWrJNC{NV(Kzzz4U_9k3u`|^x@{7)hRg_muR|+@yM@qMxq4!=Ke1?&bwYS)!SJY*k@9Qm zYkx-YMaKs_11U+NRj-sgTkPx8&T4ixxmVz^q?Ew3r!x}IO7_ix0l1}!wRKHT7$+hJ z%?#*dgbl0qHQUhE@>8(0boP79r0Hu;&&s1jrM~QzQ#bIU()LL@2F%V0ctiC&@L~YmVL2tCu05gcXt4g?mZq zWr&TgASW$0=|-g-&^FRdVeJMS?w&0L3!(iZ?ZtTPV@vAzOw8U|ZJ|XDM#-|ggM*Zt zX1=Ga2^7oVd(%;9HU=LH3$V@$Sj3ZHlysOEXOZuZc!25XP*hx6+Qr#hQL^S_azg|a zAF^^8^Us|*>%;+7BNF_;9JQdvt)^P*FaR-XUHKe@LnSIKOFUy-j>Og=;-L9li4oQ& z4;rjRqg|@kcOzf(;|k_bETy#=6axXta)$HCQ97G>OT?DY!7JmRRbk4{(phN0vJ%@c&&iHlB0cd1c=gGtbQ+-O-2Q|Dm&8Cv_FN!qR1Q zd&C~MCt{{G(lY5!2sB*seC5D)LJ0I|FWS!_ms{aVYb};L%y(o_V14dnYYlg5et1Tt zE2}q)MV!y>)WCZB0~SjGayV)xT3!CTwcSt^XrM=%gc=_$%8m_%GBTWFL5EI^I#5#cHxNia8Tauk%Nb3Iw741CqRJG%$T zvI;ttruKTfFu$lJTB`JI8Ogpd@-Q}6A^H9=OKLmgigZXj28cs~P~|QcZL!!mNz9uS zVr;45SdKU?b)!Vt-B-^alq|76F3U(*4Q#wCbrkhw#$8HQj6!%AK*(U=f;4(a_t?(S zH~zs6m56NyB%UK$2^|GOw@GyGT1V&TiwHn+4jn^PWy+Q?z(1<(hsPh{!1@f78Dt`T% zptEE#H-SPV{(VAXTWre3E6?18HOw4{sf`?AuH#}LEsIU6B|D+4afFp)o;vIZJ|s7@ zqcu#964IA5oCqsqDIc`MCBk<-eIk4zOYOS1#YWpiYYb%CU$#X91qUT2BY9QS<2<-)1Id0GQ!ZZ$uPg_9GL5?C@}sD5|vcdbp` z&SZ-{A3n-;v`HI=G)sYyEwb{LiBg?kAfw~z=sEc&4b#=F!J_3H*!q1ecW(OiHrXYo zh+O8kQ_SPE!DY-Dm<6=KrTx+B-vz#_nbW7+lmNQvY4T}$QSI^< ziF`RE_~7B5;l+!LH*_=8CTp>5G9$I$(`HQ&Qeq+CC%pE~fwK;)vA1q6C}p+BeUY*1ebGgK=B5izK+nT((G9MGjKD%7ud@uu_(VU`FCp z!Ci=&8>F4f6zSQ{_Es(eUCC3r@IZkhB$FG+7TzyJR9pjEXXf_c=ULR$6l<$HlB@I<(@M`LV=;9Smx2A%E$V01voEnNDQCA;4d1%*D|9)7kG7Xe*< zvd-Re45!d7i8=8b>wc(>adLghxGARQeQlhIE9S~dj!Y1$jK#y(793uyPQFZ`k)1OM zO0CFG$vVp3?>#K-q63G{H&585-5KhU&9xx51z?x4L^bmLzjUj~@Pb=(PsG^V?ecO2 zY5X0i=isjW`$g|w>T!ogf7hjVT!Lv2O74xRKxSrk&-(F)P|7ZcEizfAuGhCy`Y((G zY^$xmp#Z6F#n$T1-)*1KRD+JY?!YCcI1p8EqJduiv?n(S<;4`GoT5(>dF-K!Zj!S* z8duhVQ(bE!NkL~S5-Dv3m+G60=w)>ctZyXbvo{GIFmqzCD^ z(L1@DX>L9sHPYDem-1Y@W_FEOt}P_>X7ULe-~BgTv44~d|5S{VF#c_2Axp_xZtk;3d!a{tItqr2DDVw4p?Xdw zG>kDwWwz-TFGwN1xm~II=_{Y5I|?Z{l?Wb}CK0^z2%Swvk$a3j!yZ3geZ_7Rpxg zV%(rL9=ObCu_nWd{x;qXRBOa1l@M{ulvu$HxOO`BiuDAKt*eQrGj^C(J|v!iWYNIR z(AqNEZRp>L`d%4cFt@ByH%)7Buf^n6KMj8;kM&79Spu|Wp4vA!ZLY4mE!;rpqus!r z`HzCwd`XEEC->`28c9_|SU_DdP;TaTZ^_M@mrA%*!i@~tzOw_$x)wIAaqP7eH+;q` zUN_rU9v`QV;@J@QON0sR1L!Vp=Bj zC5uH;9NB@(BJ!(kM7x61II68A zaFEt$z2UTdvi2;Qu}RXw`ecOlo`|W|!`kk#_vDMFb~GYoO=`IR>sp;5VWsZ^{*SPCxpQw_|2#s%zih#A|NlMYzX$!N=i`?hW|? z1cm03ZlQY6X2(ML*5R}bOb%xlJ#Opzy;O) zbqFi_ePFpZ;tRjW?_$a7eG8-HLS5kI* zcRs0;Ww6nvT)Xw&8?3*F*k5Phm5fFQt2p$=(G7v!Y~rmK1i_?Z;#?iq29(=r+cX6o zn${bUvD4BV9VXAnTR2meZF9Z`$hdoZLi)I;#j0}k)#I2qGh^zGb!%bA@b;)??50t! zU+t)l7C*N2?i&aWnj|i!Lx&IT&{AsmF%E@JsE1%6t17Awx(%@;bh6T2BnL9#zeObI z5SSOOAldJ$k>T*E0wIvPLCFS>l`qoJbGz~Icf`;;-}B8ebpW3Qx)civ2yOg!S(P-F z?mL~K<)zKEzCi*VYn72AsVgAv<;;&UqUfwzbAf$W)fht1DW12XLrzKZV3J4~IIhgL zH>Wn%9C?L>S}Y1)B2@HX+vW8r3ut_w@nWHrcaOi?)g4&SUq?)453~T48aW>;Af&-abP$_E@)ntd*cnaXGQ2)EatVbV~E^DAbb; zI&Fk9jbn*zuR(7}*N%GT6K^uoq~0Gzde3(GshW7qg8V7$*}orq#WjUtKm#8*yn`Ly zHM!41zuH*2sUg@ZKbAjK89UlbdZ&=wIA}b<)SDoy_VYkv1@_`jrQGx-mRxz?qG?Ww zr)`&SNSu7x%-uum&W%;j4xRb-Ordh< zOOqbL#JDm%V1!pT+8?FJI(cmO1ZuWrRN@(H=mLCNtI6QT0>PsgD0E`i)oe zT$i-b4jxqqkx%)Bp0jH|d=0rKgso5mhHm~b-kVKzdtj8(vZjBJ7W{{`Mf&He7QQ-P z@F0{VW3)jU)+tD+rKyBiSpQ`+>|73aJelKGI zKAXa_f2lIX{$ITA|0jF?)3_yAUPcO-5y5+9(PmxsB@eZ7>t~h0*@%!xYyiQ0hjM#~ zr8cp-V%*Y|Bp*ZdHBZG1fn`M z=^v>X1=_vqdRm|` z{O%Y*?HXJm&$2M5>KaF{sq4num#LWVU$PDQ25GU!*znZV>*edN`g>bJF9>2ZfZ1Wj zv0IMykf7m;Ymd5_zSEBXRPVtBe$;-Vz)*KU>~>V>*GVCj*J{S`%;gQq zxmvNcBu1>Bt23s>Nd_P31aYGk{xKMO*(!?)S*bxxejB7Ny?U4?S-|9vuNl>i?quEL z>txzRR%t{992z)!&)Bd>r4YFJUqPR_lcSFw#qO{i?tp5-`QtY6w?;MbCvi@9sbH&p zhgcI1C}?q~Qe+RvDbD+F^$r?hLu8Qj-JJF=ufP96^`@9R32#4;H!8{WH4$ zDgKj`wUp-M5#HenWh4T%L0Bj#&xnrUJDQAN0MU7 z8Piak_2Pme*+4ib2|#o}^&Y7b6o?6cXrjP)E3oV)6TB%jY&C@|z(C%78z9-CDz=jw zU@@S=GmsyExu|dp3JTYNDNP;HL8XLYb{asg&=Oslw(g@mskzZwgE_$TkXmu0ay2nQP@Cc+Go5fwZ(cM~*tZut*$^}WMloC& z#=0W8ufFJ<=W#KrT?UKvFqtQzt2Ssl1Dai(Sv`$7>Sr&~lSBAXEjHIepaQEtkBaq!=fw zu|N_bOA??C+@T-J&Xo;QpAuQDaUS29`3O8!YOV#GjlTNkwY0}g-fdca;GS%MCfS83 zA1ve)S@`vHK_f3tNc%++rjJA6$EY8lrs3OB zH*=)&1b>2P@f-%{x0kP)^)!@GczJ<@n6rXnM@CPf#yvWwPoR{Tcyqnr@-fp3!t8`i zH4b3U7A^bTrbK@rq~;(*^l8>Oa>ZKFbds_VdmvN%jjJ8o z;z)~x5)8c4@6X%?j?solN(e+XR^P_F=~aJQ<5sMiUEa&N<*oxYA4dO zpdZ9o6risy0x_hbIaq+vTWDKc5X-QgfvhkbYOelGPzZ||i>dQxC^HtFzG|QG5mxnf zA^@ACj4;FbpIB&f!ZMRkt?%eF4)3#Z<6_`xu<}*&6f!2|h zx&kQdWm7_Zzc`li;X*%0@f}kdoN2;h@rE4I9 zwjJ!fP$S&l7BIew+NJdNy9F76xd*RmujRX_jb+rBSqb>SISKhRB@_A=r;^o zSIqyS)Vcs@*u;{viHka~oBu>T<$FZ5h+16$RwOFbQtn2E&!VcauNcjZH4VH(U}!b= zh|#H-$Vwh!SoF1kA?~Pp;PAS}CMs@OK8@f4^@ze0{Z%fxo$| zUk1VoXQ5dvMkrt_2&J(WYLNEpZ12Mq8oqYVjK7LFyTu_sEveU?NG3dDXE>mLPmCbQXl^}BR zLYJyzUl4gv^hH?peM@mty(FlVkX*-cU=oR}Yi5^pX%rc)nniGvvQ-eA2>?%&a*=qywwA%IZ8rvbe-uZy-fT8FJiCj1x` zj4;UK&*d+_%OJskRrqew`FoOc)0o5egZKSwZJ;x_6kIZHCC4zwa4~})B05ZV1RqS> z(NHNgv=H7b2J=u$2&lBXdVgjJt2EQy;mszdiQ~Sd&)8dQa^CmwK6wz$L4_aSE=s%M zr|PUfUL7lGkafJW+OoNA{Rv~r-zE0bJy34fJpjBr1LY^TSn$P1h23^Xoelj-Sey2jKtA72|Gx5iyohUTpO6m*->Bnb>QC}a^7fHcg@aKIh zm^Rxn-4|fMw(!wxJ!mC~Di$VFf(V3Ht(rk<%5JVtwnql@wr z^RrD27rg_4aG7u&W9TgydU%{ye{1ImlOao<0X(!rU&-TldlbpMhQ z3H(Q-{0$${#%|_@wx$mHc4p>=|9JWTqx0>psAG%tY4;5bJvL;aMblU@YapVLJGQ81 zo}Nl33R5PrYPPdlXO%+egzgG%;4Aq7Oz+)u6nMSzhuOE^57ie2(@9>(%ZZMJ+ug$@ z+b<8bDFT#%t>NWRLLxE))lkeKB&rVbwN&~ty}Xd8Na5~<3e&a0GO(yT^H(S_EIfVQ zu&`EUW~0*qld&uo)&S>%Ma*U7Rjgl~rdgq}FS^t|UPHC|4XYns)mXlzwr#*mh*cTk zXWvkHD&TW14HCZ0r)*=WbZWW8 zi<)Y)akFp@QBLHB9L(G8++mW>+VV0A>92!!!D2Ad?n3W4%%6&V)V4p9b?XPt(Z9b` zsg}4kKRz`ur+J#@ip=;$1Y!QH&r?NGeK~S)PjE-$X~gZlw37gB$%5m#7E|e-f6s*S zI_@I8%3qMD*-XB9gPL^M;7oERPm??WQ*(^nzQmMY%WLU>QNo~B4 zQ%LuC!p`(}IrZ3h(B7@``g~Ghwf2;Zv<#(jN!O4Ckz7j@&L~BET`;Zz>RG}R{QpPV zS4PFzW?Kdg?gS^eOX2SB?(XgyENDS+5AJTk-GUR`-90$L2{4s>-FN1j=}zBuXR&H6 zc=`3#KF_gdpM4bjZLmFqxF2_K!DP#pKXEX;{`N|BOACt6AX`)Wqpb=2|Jd3;+O0}@ zps`R4zMX~dt=3xF+QN#+6uT6@f>Z%8e5pX`$(?4zGNRm^_meT<<{D3) zFrX!#8Q%L@F6WNx0{ZK3@bggmi-I$P+k-2y1H(pO1e2kZO@mQXs>6ddl;Ng_Xcooe zi8NQcD26c7JzRy=g3?---E8f@tHn&vdoFANm#cxK)+&Rw&rOzcEX~82E`gGEV7uZ^ zS7qy^cgqBUioLC7kUVnVt7~RV?{w>2)1#qv(P<$|6-Kgngvy5H=AA=*&F?HXQ1VVT zyg{34_m*+nHrG{qE-H}`Hqtj|Yu%EfOV|x*vSsJuKuvJ`@*gz@;y)5PtB5#t7<8{7 z0>5`Pcw>TFjb^+fU{)1X1dAgRT91&(@$V*KR$36%xb-H|@)Gp~N>a;Z5%b-LM-IaC zIYPn?3cCbSsdqNCb+Vv*6gxBr9-{XM^n^L+pt$uKw7$UpjhjRY39f-v=0_ z|0a~mX6{xlRu1<6P|<#N^n+Rt`2@k^9A zY)ebP9zG0S62_CvP_EkqOaZb2-jjytCC>k2ZhiJ%NfVMyCzqMaI*!ItQc@dhEHn*MGzr-)~QF}Ut2H1mKM^$fu2yOcGE33jW-bCk;P%xKu7||7U+7w-fu0315S$Z zBoRCNnJ>^+om0~&;CNH?Dl;y0U*uucmCje{Bw(FdS1Ddz`erg@Qq;y2(MRDtf{)7< zP@m>8hHFiaqH;*p=A)9b7;MJ)gj~apuSTNYOvAN*jb(AeyEkC1d$RHs_yHk|s8Mok zp02y<8z?i_>=XI5nX((4&MPpWd0{KiJ`^)BE2jyFip6g@x)mh6%kq;FyUb`YD(0fO zyo%--MO#pK(i?iWthF35X(taT_`d98zP`VxYHlcL5b7>d05}QYumeyNO)~8np{KbR z6h-RT-b<{-Fv~*v*Z64qc*DjF=rn!%qoTLe|MW!tD{1dcwK0!hw^AKGa?GiXWURBO z^??Btb~8tBF(l!%!4nOqNvI81R5K_{u*X@^DHFULcLt(bZymqMbJ*6#L41uN4aFqX zYx6kji2@CoY}Dsk;p$!Qj&|HVub+3We);g@ctd~}0uY_|e#dY0(dlc9$Il_SA$Uy; zN0DwhuqMcMN<*pcaQ{I6xi^8F?*!kYk@#?a>rW?v6m3xyy}ty8y@_^l3XCvf5O2ob zGnN%aHYV{xMHU7rsjwC`b5d{7?30b|U|`MYl9AJGMh4q((r@X>N;XGJ!H%TFUGR^~ zG)K9h@pogw(yYf<12KX)xS8H%njXgV*Lln#sxi$r>CTmq0DpttmQNCjmU@ z{1U#Rk2tzf&F?3dE^2GlDPJP=e!Y#VkScb%7UgNJcGsX?F1^jaQ4Cb$0jBCF3cCh%r>zuB1w`c{tzJ4)iZTz;Y%4hRVP2^af7jGm@T}!Fl`ZBuc z5brLrOo{P03HQ>r(wtUHLT`cnI?Y~vC}w`on|i%kFu1rlXP{SG$&T|Ek_>3N;Q#apZQM;m4iqA7{KWNDszV17>XMh@s#^&0_A zCxU%luuN#4Z@v_x!aA-fG?st8B*n*e&y&->PU<7U^n+H_6$C$atFTNyixS%C-de3r?dvFmfzDYT{&7s?!&5H7Ii{!^9S^{8D zxVZ<~DTy2~UKDFay{q{S-WGKsMf|-WFp@>`^ULo|tj|Zy`Ea1px#y27nErDI=&wrP z{|@Q@NX%5NKpQtO_`blad=t#2`BoH|W2Y7l-;D8DyHQ#v$_>_!DP~J4RJM3*G%+~a zc>A46q?42`RSB6N+oc4~Io>yti1+u-XWn-m9^*ZbA?n2y(I!i`#wdwrHLZs3O(gN4o$^L zldD5siH=5s_NbQ89~(Y`t?DGw!+JVd~`zw~zh$B<2m%U>hN*5a+B`akOANpFBF=zFI)q0I^%noa``7--lG27%|;!gRsw0(n1_|dG5ideJu9Pr2B3M-V}j_T#AeP44T$$DQxiYxCdx9sL-iOj1@y>ehD|8e~pC(YF;s zuRHhWW`_Z+6ybT3#S800>K>wnpYR!#EkthTZ|l`;nMln)^gCKx^e^L#Pu9asVqfL! zjYDXM?6ErTm~nXRqBxR{fraw}xFlgFCgnZux(#h{o{VWNQ}X)1(xh1uBp%Px;F?r~ zD~@W2xzII+4UnJ2gmlPoUZfH#=fimGqMwJIV5VK-W_kwMxA!}>O9=2!X2y_tgxr?b zdThFxksBOhwMtdmTjLm&)`jg;+Qaw;JQCOu4&4;HRjNN~=17w|ed2eT)Jc1NX!lzvg{wS{`2o?zDQ+=<0P9+F(7RO$Sv zOqhE0rI_EL=04FiD;Hv-6p}HvPlmf=(|vpO_btWc?(#4Zs6yiVVDZLELQ zu}O+zg6k1P5qq^{#l`z7LltI|Eeweo>>pA%hs)u#6CXTA5nHb1`39NX-Z(#MleM=! z^~39B`x+P>lQWYe6In&kAkhrd4i_?G;Bc9F3yn$YJ6R`ExZe5Yp|DW%&*G^9@M!u@ z>bRwvMDRQ4Rj)Ywka%s40|HsF2|4|5^X+1e#Y{*&Ln~%;7p2KhP=>F6@{>lcaQ4rn zoGN^Rp-+27`iVruDOMzxJqeau8ZF|u^`_DLP+Rzr1HS|7SKaYGnwhl&lmi)nYL9l z-;qp9bS1f0uwT_wMOpAm^h7Vae}m;Rk=pA!meF)qjP)j6bDF8#M1HV68j2tS8XLG{ zh>Pvd6p0}&y9>Y#WkR39=3ok<>oqLZmDz?8DN~Ig)~NmRfY`)))|O0gZ{OfETK%ZU zXQK-*q4rASWbUQ$sC=r_)tf_&9nP+iSN^@0eq2p*wPP&|9!K2nC}O5$4aF#vgZCTT zco3CqUtA<(D|$T-FInb=q5=)#a;`n!mT8q^h5+~#A8QFtaZa3DLR}-F zqJ|)@+rIcMnO?IZT=B$cDMt*`Jp3kMITz3Zoj{QC{jr>6{~y5mSKLX8``r)#Zd}?d zxXskg*0)l%0f;r)4k=(o55KXh1~N$CHGX=Ib;J0ng-M2S4e_Wnbz=Qq5poTGoy%qY z0N>MKBTN5{YE8beijcmL&{uVQBQn`62)O@If%K8cBvlb!tZ;ra->y|baq#P$QJw@CHo=bx`$ zy(Coku#%YZ+C?PCtv+gj9IsGt&5d1oiJT`@SY9Qd)|Hr^NnArU9OC!)9|2&U>0p}Yx0XmH+ zp&wU@qM~YUKF%!%=B0=O0wu&y`c+Cp!{0ijyM8KViesg}mnX!)_!@+~De|e#MzO4a zgpTLQx2~@4_dKxs)MQP8iI_xXFg}GT6^`MgoGQS|7!{r(nQ0K zh4S!(@S*%e?lyUE;LYdQ;xsqv&)Y&MP)EeQS8*!}=x2qMi)|Fq#XVgsaWpl9P8tAJ zZ6UJdAUqASgH=Ro7M?Trd~{?qg$(F;!`Z+k%(L##Zm@<_Z=Vz5`mPn?Sehu?6YV4K z5trJWwt0Rgdu#N-0zJm+9Si@0OYV&rmYWCxH2$n=1+WT$^V)Fz?75YB@jLghb zm)l6zjn0n`aE8be(u;JtxjCgdg*lbo%5f5EJ@Mh-G-&V0XH`cHg?eg(-_nHFYWMjP z=xD}O6K~~4Q)5sSz8B=ec|Y7(FPqc3XQy|Frv6jtPq%emWA8Q?Z8CgE~J8C}j$76(_WFJJ=Tyj6r0?j~&)+ zl(MI?rT0;ns*QyIJEDldr+ytm1Hj?O=x^1Dd}wlHP=3p+M`6#=JDiNzw>?7*_x>wb zJy%r@6#?1J`ha@Vwj{Q9A_oQ-Ry!%kAwM`IB7zyI_)-;c!ta9(;x?4=<$ajxD20*- z59;Aq>e;U_x_8e|&L+!0;fwj3M=P?Iw_vzr9}QoGIYzUim7o?Pxyk0LrK9PHj!spk z(~^!_uq+(SAX@g78e-X~?8|(U-cSAz@pFkD&VqE+riiLgu{@F}ggr1GMERBZ!pUwG zJI=3O%pLk#&hoVKncKX+TFMWbP$i@H_P77v+z9G^0a;kuA3MXsfAIgm+=#f9t(mZq zi<#;FX<}r*p8k69U$PV`HXs5%wjUCn>umH$a|uHI_da_ff^{<(xWGJJSpdaVe}UIhgQIIdULt+%3!;4|3p2?=M|n@!i)lH&y-el@3t(wex9u7c=?N8TrA ze#W1p{15VRABWn$50_?F*N_>|4NtOuZ)ucZ1Z`)dJ+t;mtsBoCJ(GH`q$=m-v|NM5>!Exb>>uKEkVB3t z>{9xLm)uw5wQ{J(S`CtnuDeXo;*)|a2sW_vkum}wyssrU?V7&jK)wJGNNnWpwZ4#9 z!tVCGxt1Au&8mQ|H-U!e%OF075WarnlM_BqqpeTWYS*APsi0mTIB;S99lai|wTswU z;N7zDTLM@0sb{FS8@(yzQ$~4j%Pqg;(GI{Akj)E<%Xv`MON9jJTj#<0)+b+$IYu}8 zY3pv{a9`p(sUBXFqj`#@H5GWtr9VO=UqsLL0z8)GmDp1i$JK1lSepdl~0`>QKAb%VD$II+L@Dy$T?TPp+vKFeZxS)fC^zCZL?3+gD z>*v;qU=msB3vw_dWdf8LD(SIwbON21C=6yoSgF$ym6eoKFZ?{}x$|Ruw?c3vBSznl zMwc*la_%JTwC!xp)8k%ld8j9q=499Xw^$J)J6i^mTz3WZ_qL66_bk3{@>q z(^b7J3tv#vRlbZ4_rT21xCjiy!fY=9EjRv**`Bvq8_0ptUbumuU#=(9}#m=1!%T7Hd!dLX$2%j5J6x9Hb*ldFpJ{I1F*TrHxrTIpShVmr9;P1 zwGa+DBX+p6dRe%~YwYkt-FPS9ziib@qKtvwiqxf?3_b|aK3-(NRKXyxU z;~)p~xplUD>_Zc8pZkR4 zcu^9ZX1LHZ8GUk5T|J~yGzaGX-7iOlgNGizmBF05eL*%{ecV2vGUdIQ()$dtkB*0l zVM~>W@N#HrFa1vX);bZ*uh~3%n7-YaG~q2X_B?@nR1CfhiFzFV9y(Yu9J+fv! zx{4A3NTj6Oe60C^-gloa*=)@qnBJSAaUCLbva^^ipR#UbM#IC-iRE)R63+6@zLW79 zP2`LvAV5>^VoJoYb*dd*HfLV7iB#G?MgE)(S;?5$U>s=2%4EzGH(X(5Z=bAa-ZyS4 zH{z7E>0v<@IWrQ#%26|cjfY;^9K2UkVfHC;tg~%qDUrS+GFa?G`AX|7&)va($st>U zRD#cSGxj$M&gha)ObqcoMXK$vM(9DQ;abM56SC8(rCarobFxbi;>QngakK+T9J7TkHUKt=9n|1 z>5kRVsnrdjsY;DVE(MTWjT&Uqs%rUtzZH_ApoSc-Se%u%+E+iV*O6h@2C*W)mvr#PRU7h=6zlf*P4x+?L28@qOB{T7NVD;%}$ALS}+ z=e)gLx6K>Uj2m7~jnGwNL<-+~yD_W{6GShQNcOY(Yb#7rFx$Zei z@;>bII{rMGf#7cSvVxNht+cb}Xh)1L`9=pwKnVA0l`|kr2E`RdIvkCP)yX8&8;Kj? zI$Kn$Pr@YW+?|W2cxqD=#qE^R;H9JAJ4943O6ETG))z;BGrwH3N#%t=u^N5uup7=b z{(!Ldyx-`g7xiRN>{@XajJpom7(CPw-uE8q`5qPG9y91Zs{KVUQPmH5VjupFr?g_~ z-i>N`g!wTq4o!WUBfr2IIotzH8`I9Z5Z^(mTsJ1YUNcwTs2`DctKTpL*X}j&R)!go zU7WO#YZkFpxPeRfVL#Cx?dEJ-+Cr)AOI~PjIW5eF>z%czeX9=nMv<$T4{!oLaF%Dx zDY9ZvDjdE3Ibrr0+RiqN-xtz%1z|_Ui<$N1=deR#f>&ZTT(PqXj4bKUUE=CDr?q=# z*Dm+;{X1XW<3os%UIWDs17PtAMNa5;ZLKvqTIYPlN7Ct(Slu0)bsr2NIYNM#zMoz=$tM<_a)$4<5hw)Sho&-G6fa;B?Tq|1fL!R3uyVxdSWfgh{@ zE)unP7zE16aT5*tMh)Lipln>=aNtbgAtBr}ExW%WNndB0nFO_?%4$t)_?Qzn$jq|O zf7V?C08?JS-VWI_g!eQ4LTAbNOP?Spo4*Bp{yjQ>;91)lxtds-{cjm?MI%>NGiQ6b zzaIZ(wo(6_qg9`;RMn?QmxqMJSVp2V_{qvj3>h_yPew}dcv`hO8h_^Q-b5wP|1m(E zd`eUx4J3D1tV6&pH4vKRGBNTa<6u4G<>$>FoJY!#Zk2zCwCc?tYM=+ zei5Q6z>Fvz8<7PXGn|4Zg~bU^h9X&sxh7f+{oOYNmt;MGeacnDX4^J7%!gV%lOgR} z7hwF2hFzB%9j{uQ0kf^ntI3HMc7xvf-a75pBW1BYN7zc|Nee+06=ur>bSJy;(_8tS zl-}5t+rlq|Orz)ZUNqZT+?&#$B#SPdVB>{W5Mv-kUaayzG|w!qnWR)bDYI_N%ZP@y zV=C5oF7;QGkFcg0;eXh!$&TIYJ5G7a_pLER+8f&y4!2$8rawX}iW3G&*dYFUM^n}z z4TuQMlF#^{V4V|lM*d_A#BczL5JoEbJcuwY?l@k#F1B$Mc_i|D=?O5kOMOQN?*;c5 zK5RUdEI-*2h-Z30?QBWPvWs|tpCEsOesxZ5tVzTpra38ZttkXno_XG6- z-m)rtb-I4{%b^8F74!$mTcigB`)BB17Db)b*M0=Dru;wFhm8N;_x+be*?;)E6~%4* z5|AJ4Ixa0PE`6?^?T#g??=H6>M`40FflJI;A3ttZALrvbCwnc{&wHDjLm2c@VkzJ6 zT@jj@x|TUHdg9Ubah?B7Mjja{qlt#NkT?@8Y&bZuAW+hfjBI9zC~1JA5vRcVG@01o z6zQh4YPq#~HtWFIh|T&A;Zu>3%hwJUpr;F_sgALGq4z-k(L6*L@v=LitaRtB*K_jx znWFq>c$FU#5HqL85a|r%dV>wBz7^=jS4Xl0E1muQq6G9ZrRDL6(1i#0RYv+3{l*mI}tuS!@i^6Z|jaWFr}H^+3q|(wqGEzCi6i zBPZqw+6SQEtZW9-BX|6}Lm>Ox|Ne$=ri$J#wO+m%S^C5_a&y+F z+m9%%cXm(;J44;BjJNgx=4pM88D9v10cv}t7f>R`V;>oDilptPV9LMp91Xd>% zv@`fBu>cC@X&c5gk{ZxN8Y_6UtWM|5s;R#->@n(7vTb*Llc{xi?s7b8oz$PEo+x>! z%po?QrDvgTKyc<}J^=z`_0>zJo!1ZzK8Pw=bC|NIc1nEo#!;+1mzL}1X*y$3efr>e z-ZQSp)i(L#WJ6{cuwg)qll3ZrQ)&=6B*%hU%wH^wHsH!LlhNn;UV=8?uS>==rsA^_ z8Nev>YrT9dgU{C}6~|@VdIDat@8|?9yhQl9Enwln?6t1LobLb?76mf56s<_rZ~}1; zNR7V0(Uu#W_6y7kLi!ECU{M)#<3TS0Ux7|E#q z`2FB{RjDz?u+-2WDZEE=%%zdr2Qd?LT9w^wd#7ENlt<<<*Y}q@6eHIYrrFzCjocC` zJPwD|t2%w6iEpsvYl+oZ5Jw!xaRSS*AmJ)`zE-Q9xQvx9atLER-?Dk&8|h?i$I*&k zo3&_kUA%B$M z;EJDqG5ij}qHbbPF_mCFedt3BXD;H!R5+ku?PKwf0@Eq@l3430D2Morgk~GIX{eh< zMjKN@09i`*Nv)69a0MM92BqXZ5~|mWc%Vt4Z!k*sq=$kjdv zG)LD*UM0SyQ~G)uXixJQ!VoNJ1QP2UxvW*XEMP{v_2@lobCP!@5Gy4mmwY%aavb-Z zA#?ItPH=Zv%*u~2Iil^^9@sJetT}bgnYWVE*k64)qy9s-&R!;BK<%;huvtXN`Kr-i z))FIVn($2GEVR+Yvb`79s@9|2O?DzyT>}pm|1>7Q5x9yCuzlSDyi>L$4hI?m^}CUS z1FxOnV9Tt8p?bg`^FzSQ;rk_m&taM96r)t~1fzMc!eW_2*vJNby>~igF9~X}sw2P> zn~b->wkSolrZgoSFbnxE_M*oJTt<>b@}UP& zgxa9(K%@#ZHB=8gPH9x4n{AEGy(C;X4>m{2uFqlv$hLiPYnGvxXnXu72E1nFG0G=j zW44>&Gl`W6fYE4e3M5wX-EH+A)6?K|7v#Pm+HI}iNZa!&{1#i0+E{|jZMVhxVTEL4 zxT3Pi(MqIYih#4V8Neql>d7)&-!B5_apVrtvA@$hKu+7be|-XEyl&JV-vxq8xK5#D zW)L5*#U+ZS*~>V^l@UCeB_?%X;E51ELD++en_?7+-iF&lQO^Dq*J0yG7&SrY<@^!7|3*e8XXN;|LztSM3PgO7;h5_ZcN?Kh=Edom?T`|naMiiYR#O=mRWtGa6x(47J)_R zO0N@HQOMuyb0P6*BNd^vh0ezJ!Tg->=_=Fr_T=Hmm+X&Vj$rN>=b}y$UyI@RRW4cr z88Hswq~Yc<6G6jcvQRp3oxI?f2jsf3j?_S+0b`h^&5U3tScgHBuSwV#k^QLs!NCzu z_(Iyk;yrtu#zWFneW;=D2H#NZcp`Gae!zThsNNr%^0_)^n?G5#!ce>)VzgAVKQUaQ zId_{>SM5@>?EMZOX11YrKhU*mvnX8dAul{H9Ji{xy1<2?2@;&tDqzdi+)3tRo{idC zndP0d&OobosFbBeEMUCAKEhXNlaf|&F6Xt!FP`I8-0;u1-fh83XY?q5Wftl zzQ21_89txwQcXKL_7haNO?Rp=;KXBzKl}di+!x`d8DQVJ;yH92r$NdL9)~l-yrvZ} zDcoTTL-tN?go@poy!S)S=d!HA^jP;pB}u^K0);7iz@l`=Rs~*@tQJY$N~2Mdm06BS zGHz83JqjD$!qV1Hr!X3{w_!hC9m5p6Y)$}?f!X?kxyrLPvgrl0QXx`SI&T$YXIckvE8HHUdPed=hBG}GOBwG6MnSGlwri%Z^Qc?LIPzoc3s zzXkA;-8x1(*M6!4PYeYQ=EHyO)T7?+9|Nq2=>DjSx}dI9O5LYyUq9uOIA^=E`jCCw z(Oaw7bd0!N%kk#!<#&x%yY5uf07|{?{>UGs`_IApZ|>m#-gky14y}ZkUC~!7`kc|% z?)tnLpEug>YBNS94o$ex7rz@hOTrgRM+%|g=x3$mT&80#PeU5?MmY4Nvbwc1EGdLH zgXRY&(jaX=rB**ey?iV#7UQE^KCkV* zZ#V~9!`s6#F>Pqztf(eg0R?P*WK@zCIVKE2`0sGG`==2_Iu{Q$Ax0eOCz#TVh@++{c+Di$DL*OyIOl3$7&Sj0t(e29Mci^?`QG?pvUZt)Edo)%5uw~-33(MI zh%)~`Jo@5Md;#u=7QirA)030_Qa~jGi=aId1D>7@-(qP)F%qeJw~VDf6!e%s=A8>t z>s__oEL+=h2kn)MxyC_yDGt1TVd9(%LuUqvyAK>;7KE6#j1DXH(5bay;Pm%xp?rrY zM}QBW_A?AROjrFv!h*v%({*wvVUXhewvB+~0q0}Zm8&+gExJP}r?;e$pO^K=zTjQz zKA8>@CY}`hQYsV%l}Mwb->k6-n-~c#P_QEYW3XcR&!+jWvW$=@Iix@UMsRUhPBYV# zNVo8@{zBCS?L93$-Kc4-TuSZRsyKiwjFr z;!hI1j9wU)Be~beL|Zqc)ixl0k=;ymSxE8~wNwD^GNZX5N!a`1N9P9+i>LhI=Hkb@ zZaO&pFI?-6kxr2=vW~LuL{Wqg2#g2?+tY!O0qU;(g)5@(Iw-`a9JcB8(}waV`Y?bv z+Hf;_+G}Phw6^$Rq(&{mJoAs8Fw$2NWDWG6i;jb+RI4y0B?xs4#4OK83_oqHWw%Ki z<V?g9njxWh$itV%S znuP+%L8(Q+EbA3u0&AdXQJRlF;z25LBto0ZZo2RyV(WhDL*O&L>E|mOC#_(R8@A&PWus_fSjuN!7$Z<3&8Zc8d$*Bs#2PvYFX?WTiyhda>&QND`<3sRBs;=#^@_it>u` zxh`oh6tqXM=aR!J1|TuacyEivS*FVw!P(u!0hYhYNy(ymbtzGyAuTwp5+boBZcP{v zP>-FA`ooAC2sw{k;JP0`oXx=ZgJHq$EXAweg?@0SX_Co}of^bu-gV)GeO7hG+ z-fDhg`9`#HG%=T)e53VxHT~cf1J16GU{C*ke$KR9i!yy;ga*Q8{9c%Iu@aVj(t;h0 z{;0JtqwM8!>rNJ>?tw$}Z2iwq0=PlyR_?kYx;lOkW2lTmsjXv{>LpElpYhfU=zAaV zMVK-66$v8aO^}xe)}6$?5vR6~4qn(lN%dLv2GhE(6w~CFn4(u8w{ZBhN|zv-g~mFG z)*{kD2X=G=m<5~NS3@89lZQl#=~VkDe?FrV2$tRC`WWWkDOAfe%QFS<{_eq(6yw%v zg8(f0qs#k4er|7MX9m(#H!)+BQE zp6Chpl<1VZUgbBhnG-Y8L?`@UzslO-n&Kz^aMFZLtm4M&j_QF&*ItXu5K}N= zV?o)lXr_|~CRG09v}jr!Eh*jPqqPJFzTW_|w1N6L3du%1c=|pH22-G3jS!ullb+lkx~6cBQ!-=Kd8=!))R^1r>GQ6iipa2$_HPI>;FN?b&noQi`W zy8fdtmizZ-?C%h%Ivd%$m^(Ow5)9BNENN2$((3arS~5j?^Agc*8-W3LCclU6#-s3iA4?m&4EO7~ z)EC<4_qG#jKK}kl?nTHCnJyDq-s2CM-V>`oZ%$$;-^g##Vg}IwRT0Wl)ScA&*)ZRi z0MtgyCCjPG?lt=@;I828X&o^zFq+}`l`gE_cu$e%DD>JK&QAKXfJ3vghMSSf!-c_k zO7z-ipiPqI7<8}T1|GgcnX7*50C$M#sNT&5b^z60s1z*^R_wOgEe4jrZEqwmfyP8Y ztvglYFT?$gDYv($Ihwq^d`Bema0q4yu9~Tgl(L@3Q(+KpeZfiN-ZyxJn;#3Tvm1)Q zz&JRdfmyP$*h1=FY81}Qp@gr|lp#GX(yEx$X`$xC^?mPIS<9hz>QHbh`8qbG_12Vj+5(vlBIzy%EdBLnSR{?9 zdr-_jhWcg~hcGZF@5;QPvo0WQcHuIAD8DBP&N|6S+!C^ezi6#5MJsj9lA$#cqT#nmolt~>*Zjm|SqyA2f z((nPAJauHh!}up_r8#hPI*_~va(b131b0~Mio`b_SJp?WZMQ&fg~oF>j8UsBCGgkwEp#dSf!YUYkX%7fd+?LX*35%a;&a z1P)lXt2anOJxX5l$*5mWex}o*4ZrW{%3T_y2)ZK0Kc?G?LW>^c)A4|n=nOsaCQ_en zmC%_JDf%vTbJo%0#bGQTLJ8#UMw z!RR^DOK&14jL=JXVjC}`*Ml?W>5y_8Uj=iUgII)kiNIXQ7m*-= z>{v>iT$wd@sNe)G{I!4>r*^COQJz=$X`;a^jT#yN^XM&H6M7UY?ud_7GIu@w`A0S8 zRVgLcDV`Yt)vFt`bQl0yGljA|K%9V%4?MiTV1!3QVQI)Ux3Nev;!O zz)|IzVb&6hsTnxrP;Wx@Dv+gtuK{&?j_;;yO4h}tY!gSk1Kr56v)HPq%3|5ds(Mzd z3-QZOHgSB@Bk@S>oYb4RCFZ=r8XqUrE`1T7^1(o@$qt4KKrSj^77a;{1ALco4)U_` z^b=m%pn_2zg!V~ROeMefVvQe~&^cPn3|mYM_P`Pbg_SRaNz#lxF*USc5g6OP#7qC#F}=-BmGdm~(j7pG7@eS>iQ(s~n;ptG ze4Z$R5^wmxuAu=XhN55Xpgb@lX;Ks&HNF-EQbXMZUgWC@rw6m3F+eK?#Rk`*jlXpQ zKd{cp(d!Apg+x(z!c>A~!h~oh$(89x>{DA!>?9UdUF|W$G?`S5+=&l|4R8i$wP*VBSx(3CytJIKP(sc`(~XfJ5y6K z6iY&xSAv$uBnSf^Z-fKVp@U)~f719OW-;tzFW0G0vG(+h(@l5xKCOk=NwZ0ECyK45 z?knhiqsj>WDyPw$NJQlRIWhO;LV241eTNxBRFclPqW7jOTK)K+G{Gr+i>2&sM@?)~ zd2o?iWFIyT!5^069Cn^5ixW7?OluLYB87w z$gPyE14Yp`?29$Lqit5w;QbrkBZ33kYEPEWLVWIhNDI6V7%&~pqBC^ zryJlzf&`((5fO0Vx#@LH952gf^Ys8>XSl9NT3;$z*I{AzX|)Pgg&kRT_uTfOxaoR0>PPj}50vN?2bbGv2P387uG7UlV4X7$11mb?P7Rd;1_Hz{7)K%=K`-ER;0!Rx}13@s$KnV zJ6dQS>ry!bk9(aZ@01S z(Ie5It)fbjY>&GrPW5~4ST?2xk!Ieb^7M2wiQ}->4PqO625FH1aojccSt4tdm$-U1 z$MI{P9i})u?m^|?s1c0krD3yO$vdkb- zFU=+}T(KG+pn`!K&IV&?EiD(j*w?gjl%zjBKC2bRdoOa0e7_Ar!LS+hh2jP5HOk9g z1vljFQ7_+0#vaeX=)>jlF~^&CXI3c-jxxP=h#stT*3IQcQoYU?xK)RSoL}uckHV(@x2Asz_<4xfR$%T-8-?p@bmX!-F6RF73TE&2$I#lAB%O`h=AACb4qRj^dsRt)}M7e zgZnt|#?RIRDJ<#Ya!9PG5vDd5Wi2YEa;FoI5KE8qX3h#YLa?=pq|$O!PKl~0RkBg! z`~qGASb^!HqP!=6{KS z4G+1bNGuh%5p;m>BkFxZKZF-e=;MjKhxpZ}7BnYi`OlX82ZrRou;hRAZB#ZCmK8y% z3lv;~%1^G)-lG8ufQ631J5nZM=xoK$iMB?JMYFW!)cy7S!LL@xI_$>G3D00p`N^3p z08Rh5TJJkSx;q;WPF>eOAD=-v3&9h!{>Wm|rT9P=0zD#Z1b8we9lWMecXbXCg&-P= zgt!nm4yMATrl5L1ylv^?*G6fv>{3AWShX_Md{w26j0^pj!qnVz&q@&>1f0juWb-x6 zTgob~Cce{w(?)#8N#61?2soG1lP?-~^o-5l@9oZ1s#K{uV=hR0m5hMQp+QQmxjWWu zlrW_!>^fl988j1}V2nR5*pU@GI{Q;9+>rhRXU0-~aF}=tBE!l;HS@Ie>2K-MiR9=x>twVRJ%z zw*QN^cMQ_COSVPVD%-Yg+qTVBwzbN(Z5yj>+qP}nb*sPb-uIpxdw=Idbex}WynmjU zbIi<{IY*A6l>Ayt+m~U8^yVXsRL!z|FdSx@caMF`_TX1iSUP{twjcsQd-4!JKhMT2 zIj^dQi>vg?71%_dU>;0_w6)Gnq#*tgv@Zrt+&Uh+Q6R=M`}TtS4>T4~sOQst$JLv^ zjjMmd2>ox+_+R~w{~GLy>{!;a{taXD{^#|{<=yTXKF_Y*XwLWW_dU7pC=`&tMIpfeG_z??v zT{rkt_L2m}MFaEPzRkNXpnkJl%ky?jq~hRJ<=B8Hu5uw*M+3Ds6=J6NkBj2a6aiF| z-W;!Osnmqzw)}qJou|szgv3&otO?3&US{~#fpCrh5U>X(ja)ctymPRL95{}KsK>vK zZKAl0a`$(~hvsj4LVu&HWvg%R@c&`XO4}HI_kV#Zd9EAOAw4vWkiW7INuEeLJgGEWjq>x|yIYC08(~I=mqnJWD)QH2V45ub3Qn}$ z>7^xb3@~#_1l`C>$e<4Jc_;u@$tB6-eZv5^{AA^jJbXq4g>iNPxsOP!R?n@ixxC&! zKA$%4JdeIIzw4FBr^2^Mi>Gr?y6jaZ5Ba_ejLT`;;vibH=fWUeGgqy?Kg{nUeTW!Y z3#NV|h|Fn)@@n}n7}Tma84^wu(kf7+M{_9T2QfUNJg)iP*f zke_|Jk#U&tsBKfbl>u;3o7J~bd}ZC~AeY>iUo*N{d?A5oP@0vt`vBU2t>K;vK!^6Y z0h-+CKyi_tW4adraBp(pJrmhL(0wf^tG~DCrTCm3P^+3irgJz&*6L z`F(95xd^Tiv8VRZIX?Zaj-SgTKB0NxT$P3(mq~52yIFx?j-NR@i}r+f+`;`hbWkEy zuR*cD;9QYq&z_5~KQLuao?$zS_qv{wWWMK-3D)!D5n)gEdv!9Epw0^DAzO^*{j*OB z45DS20U7{w)+w)jWa4d`WO#f*Vkf$BozgSxMyXFGAk zUgV?r#xHw@4GV-1_$gNFZhK`WZKHBfCoUv`XM};SXaKl@O$Jq#Is)>W(Z#=)q|^j# zn`W$8?_l&647Ok&4Kxn)hN{()uBS?;k7Nx1U4^hEGb8RG*HrZMs-P6ja_^{lQ*BxV zOjvRD-YBhtJr%Y(fD^7;8Sa|Z@uo`ztDOczl6kP4X?7V_g$IPY{cnDr!}_ zuTn>v9A&h^qLR6|>}-3TeAH;&fTD`AcGQTBb5ZNjb|E5N7awgu&)m3HP9AB|-ypY_ z((|Bwgno0r+c*7kUSvZPkGcexAKetcmWs%c7y9TNA!XunxIALzQaNdC{FY00j(WzO zm+I_=?F5nTEQzg5Tt)X9Y1kASafPNR*Qi-@Lah-_h~jBc!V39x*c7rxpu71D@|G1t z#iM|rd4UL1Zk5cEbU=z*;B43->lnFI>P}W!Ka)M0!et}+EJYYUTDTX~AoCn^h9zO3 z&u`$aOOO$21?$piU7MuS!fk!8*|ESyP^HkT$boA94| z9KyAB^cI?yk6YofxRt^5 zQqpKlYA1b`bWw<#DWvoB5F?_!@yeo}K4k**%B0>|?9%1|&1E|n*s(>F(nX9^sfyr> zR5Dy6b+PL;F~}BiIX6q~ifAXwmr4gw5{vuLz`1cAnTd#zz||OO=4&-U);6v8ot(7u zxyJ{rN(#o2wqMSy{VQ%3M0K@)c-#CVSr`YFE4KzUO(kGUsIOA?gb1q^R<>>iry8h; zrD}xK4cu9{I_(_jBq9r#GovnItb5fK=Q8wQF7hA* zgz-eF8O=85aHoz8#@QJ=*$&*L-g9^XUdxuTu3v&lZpzi|WgCi?*)+M1owd!u9b#`d z1=+staNh1r6|WHX5e~qy-Lgy|E3m9cM9oN4@Sa!tU11vxxkn0h#ZE??51y*ka^gQ8 zbCD-8F%Gnod0kR@sc6dF!Wa^H7uY?aev}ybBi1z6%t$#78JL+dMAhR@&?s)6GdxaI zktlA)XQT>rmF}C6gGmgon+@xE_wLu)n%E;KgbPd*Rn+eJlcr;kTLoroHH-q+FuCW9 z_Qk2%kzw?Ee1XHfaHv#DEhHC|U+m43GwW1a9-PRdj1AJ*4zC!P)0ID#t6osDbCTfR9 zEWvmoknVW@2*{r7)=pt%D?6+Wz;t~LN28F@?j<6qJSxrAw=J>h?)OYVNW9`Y2TbXNb&|JPfJ3?aCE1b1m2_Zty1{m2U4J;EetFB@-p#qw*P`Z+MiL9?Zm9*6 z(6&CNsClhtcgh7*1Jt5%4yERNU}xMC4X#rLt1MP~3E%qm1Fl zz|3w0*^HCdz>!i=bz%QOjS5HhgydZkZ8vNMr<*wraN>@7foXhA^ZRx9C9mMER?A6U4~ z8ti&A<-zW0uS=C6|t6|FxQo(@h zu`OY@{Ls|yZ9Pe_7?o;fP%SUANr7wO`?x9W#d=wCaK=8~2|b zqPC>E{XH$RCA(hTXk9&4ox$R3+I2B&Em<9pq^O|=q+Tm68QlVvbs6ktf0kXD)86D` z1gP1XP2@7Kiv>J;|DH-GKoo*aejp*&Ne8;(0R{MB8<%C%>PuAI3jF0K9Tr^~n%AWnMiND?;iWp&9l*sC)Ho(Afqn^rK>ncp>2flx-+gq;&)f%$L(xeC zE+|TAk-%DsXC?6c$S-#@fJJ)UmXt3Y8Sg)NLRcXQkIop1E-~0Gw$z<92Sb~@ z11b|$5gUI|V+Tc@m1fXHtT1I0|G^HE;iYH|lyU*cHo}p41K7|$FDEj#p|`eB=f%-> z`N5cO&fZHZqL~nup3C}zR9?0i$TpQ^g~g|>?_e$vn3+O5D`!O%t;iCTLXb2RSF?4b zv1K{0r`}5CCjzfD^Q#jk(~rC!XwHnhc%syF7n?maDL+z054uu_v~qWz9oo+PsH_C7 zql1cSQ>Ung*a4O=en@Y}D_0#A<4^gyjNnun@M0gvZn!kK!6;Iej7}&;QrkslEDA`vj2Zn z#r?Bv`_^$#L^MJAssutw0tPxmwz3e$6tEp=1F0MqHULxrm^E$Cj-%DLT8?bPjM>z5 z?^<5mEJ#kByzfE9wKRrMlN{=-XY@Qb_zLBDtem^M0EzDwXz0wa>AK@=;@)C6nY{XX ze`fT-el9}DPoxvpg1hkxK^1N%+BJgz6XL4eZ$Us4>Z;w3NwA4@lN8b<+)lU)2%jwM zLAFZ>j~o81)sIS06_$cumgqJdQ=KnNoVH!~m} z4TKKLb`n#KFhn%JmlFVjm0S&>FC!zS<{u-}D$0J6SPgTLAwo)C{lDmykEduS*n0x~sPZ7UHJNor(BPi27R$v~5*J4h5{$QAt){YqbM@Qsan z427H%+XFwc1dnY5}PY+PSOS&wW+JQ|}hU$s-pd<HhR=BFk|DMPH~-DZS?uO;}+;4o0S!*y{jtd9}`W zP@HxO=9iaQS50`@Ygy=OevYE&9;FH3ayQvyxK(nDysI9G!*V|Jl9OhaM$p1|xLs&1 zXm+u|(NOS`C)~{Ey{fW!%5KRvt}_DJ`7)y}tpQpgk)f?y2rw_dqn5gXYm&v@!;dIiEb)=>YG#LHrnIT-gju(s7TbB(R^iL}i zC#@SMs)a>i6loLeFa8S6RSXhoQPUZ=|+TX`c+6G>=R%l{p<|cpJ&6bApvPk^El{SC`?=7)k`F=sm*10?=?V+9k#&8+JX9697^`)KR@T#(GqVh4U`hwdekC4X``ikeQO0b~b z!V}E%pvCYhe)G?YMqAA9nSCg-)nVPd4zAOPRHU{v!4p|ILfcL3T)W^Zh_2+p!Q4F% z0P*Zx``}?fK@Q8u;ICj%htom#HGv%HX<6`f12)o+ zB`W1I`^pEMiA-b$n@RbjFMQ83YFy(0(#E!*?vF(Y zd0nDmTobG%WHQ4NxxqR-m=EmIAR~fs4A5ssstB^7iudprbMNPw9`3|91EKqG9SZ+O zhxcE}B>yiHF&hCKbv?nl)DB9=Id8p?;ZxJ?;71VDx$5%1aQbA7%J;8^+hQbJh zR1WxB<7U-|`;SwJ8+U-c7o%*<74h=dUsDe>q0nysAo1D$Em8J=pX&en-TEd}n|*g` z-Ts~GHC4Zvd!I65DKQZ_WMm;}D75(CP;%wexp=$O5&*2Lx?EN((Qj6mOqq@hu z#}2r!yMW6o1r-Ym#m<$V;h(XKp2okr10;LgmL06!y|-4}v+Jg|zCIs0efY2OP>2nQ z$Hl*gnDF$)zh&}hiMV2~3j_WTd2INh+(YCBZ1)8VXd(Y11LEZY%SB%jh!H9uK&*ii z2nm7{(FJ1&>N6G43i+f1XeJOMh`^(Pq{r*eiQq(o|D`p^U0^7+(-^@tXlQty8ez5$ zw~L6+lSuCmj-E|OZh(S2<{A-g=;5!)pu`mu+gAbwy=1gcKWR&{%K9Lst(=%=j+Ah+ z&RE4h^14%Da3bOx#JL5diq%n4fMs18Wi}HafLNp~H(y)EeGt~!T9o3q@ znWX~l8MrnXVZNeex^6yChDr&%QJq-ZCL*5xA`!c_(`bBRO>EZ4oIIfo-10 zR56}X*a02udm{+VMT?v`pkSv9DLvB(tV+Y^f&1W*pDr;H3{x0f=-vlDskY{~7 zi6(ew+OU(&beB=2kb1r@WPD2_xRzjjEmfGXx0sPeIlDiiU2GE;<;w4K)e zb$rKMY;MO*R4%32x5J)|n+}^#?t^x83@$Y4QJKC-J5lI4k$As@gyBF1np>Po0KQj% zd$|6+o{jAOuu0kB`GsQ8*%0_*ZV9MpjQ;*}6OrI8x3yT%oztpy&g^>Q0zjcyx`m-= zIKlvH4p*#bsJk&&WE>y4`|WeT2Q@h~`1ICEKcYhbO}LDvv|2y15Q! zMAxX3R(;%FK~mkFc?-vyhblg7Qd z3n1)L8+Y@&u)-1hv%$VCDLP8-9ua4AnnaZisw61jCEJOe8bv7N2^)6oPYGmVf3Tdg zl^WuUW-w_1h++Z%rj%aXT{&X_=v_0-m2-ZNK*V%PsEYB}FPiLQ{p#NnSoDe-&21M}POwkRRtS{?#PqQ#DnztWcY_nrc| zk5VX7LJ4oTWzc}>UgY_T@{0x5E8uH>*(8T=0E=cQ_j$To7Hbyz)SsM8L?CXDVf~eb`Z<5 z`1QGoKP{B_8(!EYxBmAj)sE`$qqIfYk=CKg|11Q-80G!1KSYNUa!Xg^Y~# zoh%*YY^;QgY>li9jjRpaq)iw_-7E`f1Fi3VJNO-y68-V9`{w0OI#R<;rJ6+zRmK^ z^D3YXy)T~Z#){N=U3MMCUy&2WUN@i=n^SxgvF&U6bIgG5{DaYnK~y>z`qW?FWjVi8 z|9(QC5E&XmdNl$olc7<>&K=1tjm*5C)i&~;;{s-6iL6R}OTNJw(sXeHAk66x-+(y< zuw!{Cid+{$pKZJ&@b6Lx75M>-!y1_dmXb#jcFkr99V;czmU9@nN}K7fcbeQp#i`=6=KE$4ZK6$kC2I|9j&b(|zXTZkAlS(_FVN02DXcaYhA{peb+h%C+vRz8@@Jy-RA$y(79x~IoW9o6k9?bdEaJR-a?kennWOvL*M7nOlT-PV#R5!IVeEt5xd{e?-%5fg!!E>73&!Xd2 zvnE7a<=g(c{|q(qaXIa)jw zPK3_eWOm;vK}jlM#8jhbtEndG@GRB?*H6;qr0>4(w4{P=JZQj+i!g+KT)$QpH6lW1&MrpmJ~)xaLj_++cBhm|dQ%<;3}GM68>y7L;iuA>B9 z+Cz9~4P<3cr(fhpxvvfrdV|MyfJ3OpkIlSLm*Z{j!)xEHuMo}W0M2JAb=0%RL$AMB zBEMTnzlOds1M#&}>KDlNOK^XrE# zDl3=PqwRp;S|2lPI8IetiUa-CZdrIBh*pMqci#z#MvB=kfib#y<^d{H6Nf)HJpvbk z_;3?rd7SNqu56FTEV?PCy?(%#f<5!`~N~)CRW_%k#bZr(B1GwSt z)l-y#1*dS~t7l@#`A`44T)+`JM!8Urv>`#W>~MXdtUwcvO)AJ}2iyY+h=LA9rlINO z#g+#nUF3Uqc%Z!_7RTE1-t{o4lYlZsTSYWMjxaHamd+?>V&~Q|s5*lh%|7lE6DY+R zF-Eho>SAem5g{m8S*mRPk3SeBqk@SA>tN{;V|&V<-UpK9P#^L8sJG$SG4mlN6*#4Q zAW(DO;|jKYhvG%MyAGzcMW&9!qKYj1=|REPCL?L2h5_*#HYq$r{ySKi53?^ACbprE z@DIr5^*&y2KO`wDHNriG>$*oE&X8r`aH{Gv!nT!wiV4_61NNF)jWlhxx2e-yMLVBN z_W-ST39a`KFLxLh?RmEDLehe$>e9+Lk{JK5B8l-|mxFI4ee1k?u6Zm= z15(pu6Bicxq&4~p<2572h~`J$-q>lsHKm*S2NAxW#K= zbZdja! zTEtx_cEqpZ1&CuEn?`rgG~A19bJtajUi?DxMy?toc;xJMgsLJ)oCsV38us%I8BU?4 z%}ZEA5r67(nOGQ+5ntMWVzB++B(xOi$jzk`q2azRMqWHT~Nd{nW^^H-*B z*I2b@+~=&98)6(tYRzPX5>ftW;Soe;c|uh!^CVc&Kw8lRengUG7|aEX=Y(RTSggEd zg?HaLJTa8X$woytE1Q3~<7Wbld_HLNmRsbch#cgyYWyw6qDZ}n@WGp zBg6&mHHuTaAm$bL8=rwkKeR`$w8tQ~NABp|LdnrT0}=d(G|}WYd@%m6!iVX<;3J9k zoxm=tIQI;lobF2H?cuecmqqLu!HH;ot4s6U#Asegv74oy;Qam#RdSPkZysJre*_|GJQdU)ZKL4TUx$Idrk zbpVw;uc6(mdX7btiQ@_42eB@z1)N_Zr=uS|!>vkX0t32Yr+1L(nxYI%;)$_RB@vl4 z?N0V$#bu-KF%S&ksn5TjNt^Ckj8EHVMBZEgF0|8Q_-qQGu~lAD)@v|Ku-tA&4nd)B zfZlRv)dj5UaAfc}Y7@iFWS0&kDa1)GoGaNE%nlNk7U?)iVU^^oqc{{H#%1q4v9jgQ zfsO5zf7|;~y%-pR4iyOVT}d*}arvpWB0A%Xui1u_47=e56$ zrjfV}q*zvpH1ZoN6<+fkc$y=wG;%b+P{{4rQALdu z)Uj%`08LJWgaA7g^Cj7gHMxj0$Hh1SlzZbvl^=JDyWWJTX9=0jqgKjS*4xfd9Ni**OsXX#L+WY&3)$KeiOo&Io48Iv>ODq;M_-?uRwT2@ z$3Q0zn5P{Si@NS9sZvk#YNNaDS1@nL)gYByByz>hi_I~M;{0+Wk0T1ludqX`%PBSQ z+)#28jyiJ6=zm;NuQJig-M)7^@o#rp>OZ`s3ffp3e@j8x{+rC}6jdVwNDn781ZU3& z=*=hiQyOJvwuhWa2v0aKXKgvXFLtW&aWeMS;vWO%fidKK0>8=@+BY&^o;}<_Z3FW` zr2Pf`-@=0nuEvGq?-A zZag0)t;1nu92g=hg?$Y~qst?gXY@XcHDL~h9ALKM8Ezk;)tnwa?=AX;P(ZRkF2?uilG9c9IpgOH@ zm4S-t06%8JA(1jC#G{>yLJvC{cvHEot@A_@nd~j500(=lJMwiEbCfR?AD!KmuUhUDpCnpaTm4XSin~3wFsc^i_$;6;NyV`aY~Y zqH5co227mUP1=ymwGaeXC}~tV5+RpQM8$QfFB30cjfzddkl0dVRcvb-m_&OO9rmKj zegz^@g<`@D(wO3j!=}NR)CH(QGNU*PNM-mkVj#Iwyt|>RZkg2rrh#DcO z`&NSLwpy=|Z;)@IH!~O=+`vJy+x#h*?GNyZ?YJWVjshZiEHaSd%TV=eIAw>yTE8N~ zPK(N*HwB|Afj)8!%70lt4Hny+Jpn_B~J{DQpv&w4M!#VCV7da zupBc}tP=5;TA6dwsgOBj&25WVKpVbWb;HDln)qf|2NluQUieGpNo-CL zfMD-k6f#QewUMMq8o?$w-GbinL!nohV>OM|#ywil2(LIHDJN2fs$CMA@j=O2GQq}k z;o}exRH0a>jYugqoI+dL;|kzD1tU^}#~GSa}Zg z%ncF4qZl2g9AWGo5I2;nFQ9*91WwJm$A3(71pO^!75?*ZCTHnnVrDIDX=G(&{jUaN zWSrHvdk@mkmy7cm3uJ_q4?zfQNIs8}rwI`W#9WMHvms>YAE_msGh*$v^~P`yQrv5O z{23wL=Sf&2t__eBdc@iNb@$IF7o5wBmye@0*dI@}ir)eiOUe~x&GLL8N04$kg~k-j zbHa2;%u00S`Y@)y*HDT=^r1)Q zXKhfh7R7**c?C5s&^zJJ^!~NzY#AKotZ5g?f5tCV()9w`K%f!DDUYQh^%$1XiC?1!mz%5d#)uQBBxE*90izDKbl z^a(Fsc-nms)NjzNLN;4q9sS5&h5<Pvn`an43O>;<;wIUsAdxneZ4jQXF*tPFB^2$L)R$D->>+1(fwF4}wVdnw}-E z7d-Z_nLM}IVkRl1oY7QPTm1T0@_M**6FhmnFm&N#DdO5;0`dE;-m{a zi*jdF!-oQCR3nB`Zdjv%VqsV#gK}n6!;gY#Bu^VIUAs&bPF-sn6R3%zX*5F>eo?!O zzjH^*OvP3sjxJ&kjiH@1|1Xo2{-- ze@HiMM3>y7073&YR#6?0Oy#SJ*50?x^;63_u#jssKK1dnjK2Xbt^JU&!38F^v|B{EuiU9y>{frZkvkjAYIK|crWWftLrr6G_^94 zjA1jh)&@j$RPP>5yo=EdQ|?UP(|uJeF_)))yX{vP*r-< z09|#xJkQkrT@25d{#}gEr2gLY&#?YYXz$5CQ0T~v^-Sa@^nUsWLZQ%?=vOa8=sX7^ zYseeNe5-l*M)n|qz<|u5kJtKzxk0I*bTjk=s0A{jDIxPwzX$fD1KE=JvjbOy00Tk- zhW2=XP(tLuNTO+}{IGD-2GoGmqeMaNhRM+S(I#gs*WW#;2jPDLd`;up_G8sn>*sT| zRL@zBFF&IH#y`HZR}a@;1%w|29z-659z+z)k-k}?=V?b8q7#?5WSjcg4%lV@mfRK7 zORXm#ipO^ixgAs)ST;ws$Nk2~?@6Dp=dDJ%2b8XAyZCwoB1d%y#P7v>V#Gj@gYwR^L24=!CY?k)MoXGOw z?nv^YB=8nB^6-&FzRN)t$TIJ*<@JKv%2v4{8pvQTyIu{W#Ug728YwJgWs&{$&VOjyEvAK_ zIgG6k`1_4eNf$jxVmO`mW}8a023HTiol=TMO!W&VEXb2I(T& z*r=*IOP-JzbGJEe#~O1V#92a$ml3O9`x~~Hk`wdgI?&Y6iimR9ObCMsHQ+YX>+Vsij8;l6?DLnBr2r5wOXzRr*NlC zN%tKoqCzxUVB3v)8djxpj+j@b>4rNQ&ESPUU5r>WPhB6Vmj^B6$U-+d5i?0iI*+^W zUAz6^oIQ+wnPg+aeID7dkEsc#V3p2cMYQ*5A9E^UEb)v_o;TuH8*Jo>!88xDxkSOk(Eq-%viHRJQfD_M$w#Zrf_MRbC>`R?7iLpQnIEexEe zNdOP;$a}Ag{y0P~_U(CU6CoE{CSnsI(WmEslV-(wRp>0K6Vulfe2M;!BRFsJ3_qXq z%49=;vIa`3w>rRuXFyC8Suzvz6_eI!79UDrqDgljDb`+vulOMQ)yK=})V&EV-M1;G zgpGz0$?@t(iZ0)mTOxASTDDAcBbFX36!pn_?huL%6*Uh&Aofy1qdZYq0HLJH90X8W zXMIc;mbTgh&r+(Bjggw#;|DWde&=c%z!j9a6m)E9p{6ET>P0(AAx&Ffdyi|psLgj4 zkVsK&yuvrT!nN_3l?jW$~jl?M0hZzTPjX^QCj&)=KV=|O?Xqxol1a93U{N+BQ=`{)zTm4t91;bOVBH1ja8Ds zc5k2HOwIV^G&GcG=3mH;mCemU8v}d$IlBr2tA2>uzNa+;-zAkX8@5G+B2Ag0Kl>)*^*%=+LvQ9Z2|}0fD`rK!i}VPW z)H?&|=S77Gw>QwYvTN+oFI+&E$k!4|Q-qGZ>bVcgk-T6OCi!o&&eS{$Sdsfy1~36; z@5bt2v}@2YTj}-c+Y7QF+l`P)^(9xw7tQ8%j0orEbdABE zDPik4h>Egpo-XBOmQBr!GHT50yQT_;3oCoK&Dp7iJ8}5ogQ8Q2J*7B{T?}>L^?24v zXh`r7+Qe8>91PhiRf44-4$D6^#a#q}{rf$r(>+Knj_!aw@gP~buz z1j%(X<#wvefd0V|rc?I7xN`EcVJd5|pnyl6bstrB9q5OW8m&COh9l9^W8m+ZVp^BtA*Rr{CH~ELp*`OB={wkb z&$nmQ=yGFUw0`n1r0nl~%Typ^GcTJ4FxJ!23a2&Pm zt9MLC9m{;j$wn@|;+}(V|L6{m@=f4mkQ2F(6lZx`lKYie24hYLZ<8EL8}_O{qF_+B zfSOQHmuvQNJf4;bl_p?$FT(kdQUm>doLvL2%nquQjX!%lUKY+ppI(!_3R-|0M?T7|(s_GxPm7lp8a$0_EQ3+2QfAq4T2U?uU{0EDs++=Qrcr=E$K<3qVC3J~2l9yMop_rt-7 z=aq{)zH}BL#1shkguJK-X8DRKZRiNukh%BVHlAL;(RWi&AQh|0fI`V!%9D}@*<86 zgfhmGLrzsY)^UFnMKLN!gI7&It?S&tpifCb8AE$%?DE6WIR3T32iM(%YYHU2oq z;gykGv3vy4-~lDSM7H|88$VXBABwbuEom+a&9bl&F&65ii`Fe1BIb|8tt|~l8l-!* zU@HIOk!BfcuVG6Tl>c?5YLJ_wvvoi`PKyqBd6%b80W!=o1BUFVQf5a$Uq?M2J*b&) ze3OoPLBJWqPEDUkDVmCl{QDteOmMfJ-YnFxVMmOmef=!W$Y3-n_win4>129!WM4ZG z43lPH6wVti<8o<+3(@Ax@wox8tk3e>^l{O3JQvv0l8fWKN^U(D2VC)?@~YbG!dNt9 zlDmfXg^y~V&NDRaRzlP>;^j1xi@fONg^-}*bbb)0Xj|(rsG~-v_)<;?5o%y|)iv&x zPe7uG8Pg^BGjrI?72^c403$tFNL;7`zQS=HJ?-9MH6)EB8fBhWjL0Zt);fE}Wp1VT z1o!NIaQp)?HaUhl4=2JxX^wMsgutvUl_cWS^&w;x#l*6gAx%F_$gt+}oZPGy?Y@;c zjzv-Nj_9<55P=yj45;0!F=q#lNQf;1ENS2Do|B(szc8r6>}Yr_UV@`{KC#SlLUdNR zz!wG2&(TD`jD;DCvEP{WtN@2_Bi+%v+{zy7a(-AmKB{V98DCt7`tYjE(NgwHtKcTM zsBd1mNIdIM`%^BYl}D7Tg42gE(T|edQ7|-#3xPN3!4-nNa(hAdIe9B*;-e*6$T#5jZPs76{hQ4o$p~#B2Y<^mlWK!`l-Q4lHnB zPAvY9cy3(N6vf1Rrd9N?H_ssX@}pU=XPgw+wp@@OMAdgw_-2L&xBhqB&>JCUz-5H>|eNPO)$e~4TGJGg;b?m^*b&T_pytEUt%&wXWiVLoeVeY#*( zTr)bZ5d&~sZCD+T=G7#qiuJ!%WP91NiuFSaTQPYY{DEb&yd&k4te*jRt%yCV`{w|I zr@9|DYzwU*e63wwShymw*$F!#qyI9 z`tsYjb{$}sqp)P(u)Ri$m=&fy4ayRIm~q4*c9SK#{J~4?>kUGD4Iver?T_dM9=@$w zzx8X~>XH=_=;P-;WAv@HE7d@6nQS`<0FX4LEiX z>3+)o$CJP-i0fvmBRwPBd3Uq{nHj*PgtaiM0q$!r)vAqOQx}2R5Nv>~SfoSMM?5e; zH$39OL0WhA^NaZjd0)eP3}S33owEb9irXr91xx&>*gwClwf=OM`U9Lqa`Hz6fZhDjo)K31#(WaxPURAfq zbWvSFh1LX%>VY}XH7@6J>1$nBk9#J^U$7npm^_aD z0`%{*BX`lqCX!S<=@5akeOee%{#sbF`g*};Z0N?jNX>x7y?|_h{|{$p8QfO0t?8JV zQD$b88Dox_F=kt4hS)MDW@ct)hL{~QGsVn~nVCH~=iDE2tERrWGk?0YB~@vcs`u(% ztJnLyB)w^uoRj{nru5!$2l;qoY(M^hp|ilunp^tXVYGi;u%d1*$twrdv;&m3Oavq3 zE@AY-KTF~LimbR?Y$5mvx9O)_Bcs`sVn=EY2ijsIIb~>vOqlT4;|Z^kqi!iW!Mlg; z-ge;Cpg)*KfZ-ohhSx(NIEHLXI0tn~6DLnr;ZYE3iVqnI58KwQo*S&zr|#w2mrya? zM9AHG)#s&6KaZ1d3>5aYM-e+Bt%qY)eWZ-+xHQh8p~L&4(etbqIUF%9iq{d6uqiD)5+?KzNC;ScuI*IVX_BTC5d4f0HgZ&=K2S zl}yPC+v)3!x{yXZV|@DayI=pQ*0<)EB)0WRq2$eOxW3fG?vv-#FJmqD?)t)lX!SoO%$^$SN_1Prwj@yDNVOC< zrjM3H@bn0lJN({at|f5@ngC<3T$-W}2#}F}_cvDC!D>u)XlxSJmR;?zkket6ge*ju zy+p=NO(@Ca=8sF&Wp!Yn6Jca5tu(eXsb0GlzVtk8qF89MdyaN$^9yyP zJS~%M`8U9mt5SB?jJ*u~aWFgQD;jlPGu@wGD5&0W`|yE?aZp^nkYkc8oMD13V$##( zAoy_aLrl#UP3&&CAV}H)ORzF_PeL!OQW*PQrJ)ipqnpapKsE>UQDxiq>7U|1p4%C= z5f<7EkM7@JIC&LLI(FpoX^jCftK0@WlXE!5qD&S=SVoH|^*@kRGTSRFjt=s7+|)vD$OxPlF|Up&0u~w3+Yyw?3lG?Js~QPhsY;Ogt6zl1d+mB&3R0 z#1o8?NZ4UMMtD_w3WmjFGg~f56HapOtvaSo$Ar0)$4tjdG{?VyofLj_`SlmEW2*K% zcbrI7H%i6CgRa;5=)|u0S!|M)*jO*dVZ0(C7(etWBtkG~D05`3Tc=M4XcCj!ziFi> zI*T6*UYssIgz`q>g@pFG*9pt*;vJ@hN#HU2@U^70;evVOs@hcUT+2|IVoe_4Y$w3T@Hdr3;$CU7*_&LSEz6#&_Bcu#SXP}`F* z_obWrnA3I1d|y~bhkl5=VA-@z!M;hk>P48~7>VWvk~#!8^-z!4&dgPVfM4m0;o#vY zXJcW31)FE~z|uhalF09a{p?`3Rc|>lNSEs2Ev?bO1*_L^N5U?{LsP4 zqg5oU^fqb0n>mp%EL`|?NAchYTL6wQ&HskHp+(msddf@!YGBBGJ8S`(^F! ztG@o_?}fv1##}|r0*ZFTjq2v=HWG04x#ziO<*4SjsVr#Py^psy;qNyFomzM0#|sr& zH6sb1KVPzaip0Z4K#Q-H?XAPqlmq7nR#4x{EwLDj^yUVtAedriGi+O7R;OHIB787y zn_}9hTyi07#$R4w{$k#a!F;DeAVZ)-@TW?p>S02_m7$A;R>7o_p-Y6Wph}b$D+*N# zdu$Ay3Vo~$oeF!@!pOn9g26DvzM{r3#J$47FvPhc#@NKU62fo~fBY3n8v2+QN*eZ9 z7n&3Lh=%bQ=ZXX4gMOz=;#IGoRpM2(zeeKqYrliUYjfxa#+5Ec7uJ;*#&`6qt{_Kz zK`nTW?Pamj5Q|Dj^w$AIjmu=5!QKbW(qKE}3!wL3OLB0AU)xVX<*@{tZIoah1jk04 z!JYzWZSx@h#|9i!(GkRiE%D$Qc;&`k&|~tau_{NpSLq)En!H3`w0mF#@JTGdexDwi zd)2dnIKC9!+nT(P*|oi#2HphG^}TVItc+G4RDFhkYzuJQHg#|cz`z)sw9OT~2zVc? zP1sfot^xcqef0aJfXoe>X6z8`gMV-4sBMg!UbV-jT0rszcxo23T${D+6C48&HhrY+ z!$UR%zJ_dD1nWb?BM+0YY2zc}pCVHuzvvjF3YrDAUN++FZ~JLl1`btDBY(F5$9$3l z46+d!LD8a`D%-M=Fxs*~Y}EBgJO+P?uytgcEAFO6&f$1ngq_=_gk);$K?dh+BSAI- z|Mu&XzE4~hprmh+LTl6S%7Cf*PUCkf1+veD~qL`rkRX!f0G++@>>yhC;YbtJZ@)DfhZ z21IrZgaHlw8q7N4t-AUOKpluDpbi2XvsWP!ie1=DhFvM5DK`B<8^^TB#hn?T!BFFg z$2AJLG?Hfr8sM;VK7stbJHV_1hjs)^7@bguWdrIUwipJ(#k|MCo$vi#AAg}0lppkJ zX%92z=)(>60-T$+zX~*nz^s38W@hXdJeG21t!p zxHFk{4KKBT!lShTXFfz3X5Nr^V~P0I-cVhS~)>;zTx3;5rxa~JsLYNROC&_^3ekuAiRhb7Q1-z9^0sV zdokaekU;wR_|+6t%8HNQFphUsb^YX{qB)h+97~f0WR!d~`{}a*Jx5`OQUyTf1c#NG ze=4rUMR;DcoRl2AVB#vDJ#~7Ts77mih9sD%8FP5|M{lQ?{VYCAN@;~*A!4^kA@MtN zVg#36o-mcXFqrb|(z5VOE89}`Nlnl*8N#DRHYBc9xfW7P-ft%6LYR)P-E(o5h*`mA z{M917_135C9`?q;nVELuT4DuL1uzt+ur3RNzqj)lcIh7dCxb=kaEnZf%g)qbfozpQ z{urb($J6cwi=?YRQVb|~f%@5@SZ-kOG?Lk4ni%kdc#5uy1@M|5Fd`SAbr>tk zSa>lT{=WRonhmSQQ+LVu3_nG03S0UER)g@=7e<^yV@svY8M(^-iMq-eeE;gH!yY*5 zaTeQ1G&-t<>h>p(bPm=@W+(r zUcP77?`jxiB|bR4pPN*Yk`p}|0X}$_{6XKMgYT8uh=jx()CA*T=SdXox{9SZzcB3; z^`9Q*CJ}8Ewh1$lQ-!46%ys)V>;2a5+2GgV(31$@o z)s+uYG{iWD65s6-a1ASj$`z+Y{BGpvnHb8Kl%aWBk}VBX*5(Ka#zJR&MY`y7@DEUP~O%&ZW{P3D=-kgS8Vj zu(axgdzwD4o?+o@`Jf@B#h z4}yE;-P2pNMtYxkN>B`Bg4BGd_{5Sl!x@$y2u(l^h1b6^ZF^6cAF?gO5~(0snyG)1 zhW^)&04CR#;S>m=w0>7#pH8D6CKP)&m6M1JUup;7;}fIPo#>8 z<7&`f&L<-L`u*uFG^Ka6O&g7xrIpV3j~8b>cl4TmuBYHvx^U$}&&~$mt4i7eDTi_W z!PZrXO#+6LISOb*96y;$`pX8oUKb02CLttq54@0L>F24?H;{~IoOD0U0-m%QVpzR$ z3=uS8b6-y^Zgjjk^*o2I(o_=%b0&GM2udC9Uh&?Ob#C;)E}GP^K5k7D!*AJxuwsei zmwY=<4ubRCh+y!~1e5X{^`Dm8j%<<2)phULbdm;QC@V7cl^PL>Q?V>6h98L170z>y zxxvcG=Y}_8xLUQ>buthvParpd6{mMhOcH72Nzp?aP+p!YpbyvbnNC z$O}^^lDWmC9Z${rYm%vyfE#v5ka$E)c|WJ-YCSnKRangG3msQ_YG+yov#rUs6+wk- zp~R#vRG|@GtttM)i4}CdUyIH4r-;)kEe;2{DvFI7YpgZFYlvp)IarZ@8$ToE%8?6P zk7&~8H&POcVH+Vt6^9DVJP0w=~{q&nEt83Xo#%6$3k^Coe zyH!_IM)38Inv1je;bvFEr(1l+y%U>af?W=+!w4pnN>u2@+5C9ctGtZNoM)rvTwmf6 z3jGRBt~UUy%tF^ZXq-6tWtk}g^M#hV|pJ9E&FVIPRZc!0>B)N{x|KZf71E>+T%xH!{3uubEWEON+3+l7>qpBlN*zX(B)2 zQs_--h`kTNF&3CG70mG(4^luR&E&XIowg(i>CfU)=jX{xk+4V492@4UoT*G=vZc?c z5|*-F&xm*Zz0 zl&?7+NK7jdU7*fU6n z?ujNOizv+twjtr^yiCid^~!2{#U{&bIw{(p2I!qAzZDMz%E{aU33L!~ZrgcU@}Q^s zn*tw~sj~5^D~|j^==BhHw>IEkXgpsIJL4rBkngyE40xv;RTCk_P$!MmvXHyf1O^q) z#g}>2_bsaOG;=X$wp3n%l`91IwUfxwk z){o+@+x&dN(I#TU#8>VEs>J>LSSns>$6@0W&(RpWUIF6_igAj>P5ZWUbA_V-HhrUu zjh+du`a`eixk2f-lu>=hQ(m8wo!a`u7EK0KqUeaeEB#t<1%-XcQ|0y>{JNuNFbwxV za&2y~E&>v$UPO_e!9shw^~B@%ku^6ERjUQ@>3}9BiCGO1$A*Zdm}bV8G{lgws$$16 zZe|5NWb^VO%09(TU{byCY`P#;^Z}!U88wf$a_LhRRVY90QC3u&lN(U6u7A6OB(%YA zDCuuHP1rm?q9|$&slx`1)+iabqSU7O*)FLu;!6y0$@E z`{GLvRA|y#O3Dv$2nMSDD(w|!M?IOWGd-wQ5|{|7Ut-~<(M^n1m6d0>8=B3xZ)w2O zv-c@C;c)lcq;~Kt0BSkf`vrwjD@OKRqp*p^Vfh$d^Pocb+#N_0s=DZxSRqLk2e_w+ zyIz0_sFs&f3IYkes0sVxquxAnTG$zud8aLvQ!^E>qeQ~`=);##EFQXrPSK0#Fz6Cm zir_|(o5y~k?8pD=WLvDlZrOFbbsl(!A3yWg)pv^r#q^D2wY$}3-k|Wg4P?iv_3X;X zIB9RTo=~DjZvt*Wr~*D4%xggN)^J%lk2{0k(Y>ZZ?j#~xIV8IWqS2SNV+I9FcOrBf zs91?6D~pNLeTl{Z0%SQGk#yhr)|34Cw0g5FdZdeLpED!bdxftq&9tOCth~}lHx=v@fD5g zM20^?)673sHR_=57qV1GEXpzi*3yx$R|T#_iu?gnXMSJ6Ze~1TzgdmUV5RM#vh)BNOA;52CLWMZh_J2tHKXh zy>vb7N7_d1z&`J1+-5l~hF_6!?;A>l5-#C=gl!9i+7dN}6r_?53JF@iU6roZ{j?hR z?XuL7K#(RDwio9z&{0T`eho?qAx8B@2CNAt$;LA9KL~2xg4q@at2`1JVEzh7QGl;W zt-LHRLpIalQZba{S{S{<005X`T=X-i(Whin zbj5CTX?)qFikA}&>>pKl$1+uN7eSI4F5wN!^8#7EFoJ)+|Eq7Eon9DDE?MhiGZvAD>SCswtQool6g< z*&OG(nW4C#no&4WDhVC0hfR7UU?XVE*&R$!4-{q@Xs+pWTOyI#bm|?6@tD`THITS9 znbhmXW6KuY0m(M2a3)MG1eagl9kGRCAn7|4gw&Nn^tK=Pjz%vWewWN5FElJ`uQ*HM zV>a_fc?~!^gbPnNq2E5`9b79D6(hhFqUOzi=~j3+(mfM^Y;N+Xd<0mo9<&YCRGnV5W5-_iv8uR=6LS6H^C#q989EQy zw#F`}jeW0h6UngLGRHWNwSX8fIQY;9nxNbIY=yu7L8rtIS5NpxO>HA`({bN+ONm1d z;8$nkq)SQIbN)N*JICuqFS^k8!2e+x@&@l~|#6UXf|dvUWyxn`Ng{1lg>QD=$&Hsa4^ZSN=`FH6xO zNByvJ*UT$wiH>oOGC1N`Ywtn0`_)g?Y@Z|Ci`4j+crrFhrhw>CDsj#Vu4w=&;$i$1 zq3Ys}#NvPCuplH?UWmVK=~@5SmM;B2tj_+|1QP{;B-ACOtZZG)oMr4CzqzWpI-42W z{g-@4L&26%1O5Hp$QBI#tGs zO@l!SG%C?y7!)QZ9-XuXpSpA_nMw%-T_;&AIwp@j5wBALEidGaZHZzTAKBREct#q} zMMm1e0qOg*Ct){2$Ju}fYqcl)$p9CtK~k<98x@m`c5*F6Phu^@c5z^3U?#L&Z-($A zd-pbW$Q)4cFo9fSI}(|7hlpW?ETjSGeV9T{IcO|BNOC)>e3>8Fska)EKxDAx7~RJf zk^MvXzMRwfIXG;(*wS$YKy+ zVDyxYcDZ0Asd)8Nwv5z8!w%Ym05A^Q`Tkt?xQ-T3sRsLWzh-ztfRS~(um+Nvjxd<~Z8$RxnrQ)B0>P`lDWKwMh zSb9`Xqr@^aPsvbv+$-}0WnU+}nXGVRQTdRWdHNq<3EMH(@a#^#?x9K6RMCD}Tecaa zQM(_M1}d>$?}Wso$R`Hu_=|hVx`s{@_u5^WdZi?<`cxV?Jtl; zBtpW4v!R^AZC3HCcLzz8G8>cr52Xe>JD~*DN;L@@!+Ii!4YNL(N&%@je}#x}4(4_3 z3oRZl0&)H{h8*Q zg_|nTJj`*;DW+dV$13sJZ+?4L#xgT!XtHZgpXM76IYZUa&hdL1EDoufqO|SPFAc`G zN|jkD%w0S8yomPIN9Bw5#YaS&3k>ijXdN;p<3@1p8_;&C1xf?IT$JTxi~N`g*~kr- zszH&7^gWle&_kmwqHKi`~6i^pI zddN>#k1VJmZ%b_vni{veGy0s1JR2c;Q9^s+?agE*&0BIVa^0O_$_>B5907JmqEqgv zh@PXo*(8aO)xc=q$h=%^^s&Q8uYh>DyEeR0UhYH6e1#(gR~R2N2}np$z`91;+I$_c z`wfTwCfEi>Jvv!k=G$gK*Pz#V6V=+s%L8kfGeiw~^$yujx=%q|qGrsz<6mb6{6p&k zcqhbCP3%%F&a^>UY)dK~ebL+cZbZ#GcpDZN-u-P?fQTKW-vJIftiGVP$FPA*4#IBM zQz~E6lFQURQYOWOJld|6@ABOW1kD zhj$BZL8Ta<+=eHt-s?~udt#iJ7r*YyzNoW6@+#B)LK1YorCe(9;!?UTa2e8zr1uc_ znmBZtm)aN8o6~)^;y5s%4~o{J;yK(7v5aYF+_PN9Qe8*G&{%g;!RdQ=JoRuHVPXl? zP+{HOf;8JPD@_8b*BSJ(y+!y_lhvy*m?C3v`(d3zD%M4WHpubz(XdYzJ4PvpTX1L_ zH`Hn)UexhoOH=lnl)dMbJuf0oXao7sJrT%A%#(8R5- zt?2!oGA6iHbSe~l{r(~ZSom44Hdn2iPji~9LX%-dZ2hfY^<<43j&nhHogs!?*lNX` zZ2&#XsoX!W#AId1t+R=&!{e7xTuE(C-1I}8T*3K7Vcz97{vKb+`O!`=fctNfjknLq zij@85pZ+K(D11Z{rbbUNGt|2TL-S?K=)H&=r@rQ(C2X&B?KDRkxzMnGOj+&rGtk@lg9h|BSo z2oMsd^-{!T&Y@=!wjwkcxnfWUGN+0eP=4&6lyw#DOZ!h7}Gih+*V)yB%U>S{UM(+9Y4qXH9fTtYzU7l~v0%Q`T*k z+Lr&a#u;jeEM75vPIz~9v+7`62WNQ34?|R^%<>A+Q`r>F?I-yjUe8+})?Cit-==E1 zpzwo9`fQPd@Xz~|qqlGHe^ad`)KcV87;E%GA&|;FO7xaslE^+XMY|m0_2nWXIE;pF z>jRv~2d<)#0ek?&4a~k(lf#6OCudqrY4?1jG{wxJANci%*Z4SnFNpTrP%vuvT5M4W zFdQxUTin9xmvOP-_$swO$YCOsahOcydwldXa*gB36G`aUZVTbUV;WHuesZ>#C;Kcm zR(J&O`L%9mB!A@;%85)FK5WsryGpGlP*h;%gWb%@AJD}2oD13|@i2A>udBTeJ@NJ3 zf)Ju6*!N=r8J*Jp0AxM6d+4PE_nVu(c?5N?3(6HwX@X2+o zj5|hnS$KUkhTL6j&cJ&9cT0O^l@4em>rbk>EG@LGpgRF(hH31W zV&B43CB6S}dbP{c2_b+)SY0Y3i9Iinvk#qSk*90gwTlcpVDX_~+UijYW#Srt=hbH2 z8eD9eFyxX(GpW(~y#WF5dA`jGkdI|4Eb|7zTThObpWpCD&Z zA=MTx9HO(&j<%^CK-BfF8$qew{XoeOu3^voykg6m>Q7iO>2U7wB4;0}S)9uKU9VQi zu;Clx{HDPPdt{?a1S__Hc=20?Ma$jCOZc`}#7^EGkxxAxU)2U*a46p1=i{0T15%C1 zMvOH`mN^$@zu6E}ssTdz&9FkS<&oji4LD}drtpqZ;X?oNa73IaO-7eIh)6pp}jZogT0wN=`C9 ze%Y?jmml+q0mQ9%xrV?dfbvW6lB!3JwtHr73*c`t;`{*3>rCJsvc(rJsFbt+={@2a zqu|5gVQZS(faN{-@fg?hprO6CUI3gV{5kK+(E;oTSHs~QGiu}jhI4Q=`bKe0i>c&D zxf~aMy2TZ2ap>V6r@n_qb$`u+fdYz)Mxe)8msFBnk-|ED_I|4LZ?hLQh+a63s&_pgyJ`nzJ;x|tT$vimJ!UXSE&Xh1V~b7mO{2^U++fN}`PEV&=ziAN=xc}hGX!;r^Ak$&PrZpN zxv;Aq6lH& z5|8B*IhDG2pi?IYGTQ1dt+=h{8U*z$6gUX+^2(Jgbp5FwER23}$b{!s_$)^tdp6mi zQfwCC1_l`}GXtGl-(0%YQPc&J>?yy7HYxM8{ z<*@ir+$Po&R258A&fIuMm_ zsw#h%!GnJ!5&VP8>3@S~{Ad3DXOmZ@jR4kL`1T%SF6F@KX}~Gg1n1C;+a$vkNTYxyMrc7^mFdrv_d`b#s>n)O z8VVFdQ&`B1pd$l${(aMnU_zSG0sSuv-RU`%6ZCXcgV2o!S|C5s2wloc1MOa z`+kSeK=5Fox#O`3Z}V!W-JoM1Cq?VkpzL8((!lCcc8QVMKxa4Ur-9Bv(pX1*4`W7$ zbC6c`{AEpg3)hvUYE@Wqm{tOTf_M77b-oQyE@3%+d94f7Y>X|pfAhIkuS}uo(Ue;8saC9v(Q!6JbolPi7VHbP9r2&q zVF|L{B4IJ~S65#-ch#AHZ?FHK8cipXT`G4giv6V+`k$x-d z-8=%K^|g%;mALA3Pf-70J*p>&Il=4!=>M*mf%Vo&iF%~f=iVjJ|JoYYxNV@{w1uJH zgsIcCtq6&vH&MY4jmK4UEND%{LxDiKK-&5|t-_J+kRjN~lM_|)%qQgF-4+`|OF`_x zVY4@<^XoFD^Dy!NTUvh^loGeS#y?rq@Pbkf=U7TQe;*K~S4Xy}-|IVIl}CN{G`(;IJyjPJv0-a4j({ z7Ekb7vKe_x_HK~8-Xg4vOl!5|&dxh=<+3hzrZFZ+7bv#xc@k+L4g~Yh#*JcMBNtmy zt-6(-Atr4qYzQRK-43a>O8*`&baAp5KtJMgITGWxUTLCD2$P=V1uQ3x4&6W<>+kSF zsZN#%5i|47SXySMyZ(9HN4&vdV$>KfnovV3o_Dn99sX0P_p467?sqR9sHMR1u{P;B z31^(~6Wiezmf`x@eGZnjrjK1sV&d?_U@I*=wE2sewO^FWw+Oq+y-W>RsaeB%qvI8G zoc26rG%&if%M*pNNdlb{b5+Cm%b96WREv_Hvm*v`q3i{C+SF$pP*w9p*v%RfNroCL z&VLA3lDWH^Mw9UlSDKIv^;-|Pct|T9!HnYw<<0$SSFhZDUbOAWMv4}KW^Q=#A>*gQ z8Z@^1IFrskVPjsAYZ&bVJo4feZtQ3k3qU`I8V#cMUBvH`TxF(Zw_)l9b1E=4<{JS% zQk>TN&I&e*m1)YqlNeHx7*3LQfF|s)-NT@9b@|S*{oRl#%U=QE2g>Bx$_@piyEM-A zaC-}&8*7Rhnv%Wr(|8-%Y8!?&*+@%@sI&fX1BT!RNXCOrz{=erj>QJugaojMdl0pt4LD*sVm#AO{>z*Hl3T}4oXyRBzYX( z>~dw)1`O+-CH72`!Jh3NOLSE5JVUCot&_}IfWzaG8C#q3z$`-r-iIhPh}(#S_TA;o zP93MVYFp1751@k@SdCkMn9ld7`h8)7Ihqglyz&XB+S?Q5Jt6JOsIhfZpS1y>TJ;&> zFmI3YiZ7*#<2(ta=eQBqPu8e~G8BYV>n3ZJP5DUzK5C3w4f`vRq zn&O(*gap6!lBlYR+W9%iSUXPnlO$)=cW=hC(72PPUe&(1PWqwa{&P;m@}{+`jx=oW z?j#w+J^m9;mob$B6vTX@54naMeqHF=?kr&@uH`t<-ZlQnfw~t;<#J<)1R1wZIdM8=Y@^MNy&qm zS&JDg8Ib~Kz8j^(c%=BU^L?Gk7wZf_sLy^xMG2Bq&;>=$j%=4r_1Q5IYp7Y*hx^n# zF(dw6`rGj@ZBh~rZ}#9gSZx8g+_)}*%U3W6B2RG;1aa@jq(&{f@^7Z{wDj2? zd_!=Sp>(U;;7wva8sKk8V9PH)UTiF#sPkH7#0R=h@=xEj_T~|NH^RB@5Uh8)(=X)x zGpA`|QZrmKM3|nT)V5`UY3a0fe$)bO%n9jOAp53zFkIu?q&v0ndoIFZC+A78pY$+h zM|hO|UGqUioXf%iDSIG#i!L2CQ*uaXi9dV+F8J#AH2jW+W7a@MyKZlR<+iTkhf0M2 zzGKW*=@yCYmEm8Ng|3tczxa}eK=1qVA1tIVGoIa>=p+yq8oF)pPfaeOW(PHdgBxDc zzkz{v7SEH7p7UFrYd#QDd@YVb_WcUl(r`6H6I7l^_5*NbNF)VKe~J4Jd8wX^7k(7g&m50k?Q;D(dk3bu5?0f9{q_inGVz*E2-O?*uiV= zt>=)LC>2#&7ZO=|(G8RSnzOD~9_)CwC@9-#_FweD$PG+ff`Jv!lh}(>O-v3{%Ngio zN&I)4+^CbN>Q|(-{fH3`_%Eog!sb1VI$tf=TyYmsaqC`P3Bj!1U{WWC0Z8v3;I7Kh zMa|&mKPZ>>lp9|{A5?a-C^nGhE?ttYTofW65O#dx!S3i6iaT#61isZk-?`l^nhlH4 z2b~?CY;adbuwibb;otqS=uQ^-240vu&y_D1N|y+r>*y*tj7O})E9`-K=dB~Od8DVJ z8GWB0yzssxcpJd6IMIvWNc{tBiEY&iv?sIcN*FmK zKNCT8VYcZ?b&uU*KlAcZfj?z@UH8@hiPdhHDQ44smLslMMS_|bQndhqW(>7pdO)f| zIbpbPY%G9C0uP20q-B~M?KP`oiMt-^Dv35KE>S7c|CSa zI{ylud^r%nxEwYb<~PwNuZV9K*X(I*>x=36>*vXJ>abmnt9w*5J4@`i`gPN4+U{!; z^jhCxT5cD?&%`b<6yJ&SD~6*FSd_|lNpKYN$+y6zUhW3IGt3??Rmt+t{hI>n7i206 ze-t9hLzo1<)cJ=Uogzl%g_Q9Rj<;~P5w4AouEOM)e6}}|t=Hh4KK4M*P|O>dVu~dD zLsjN?6+;9MD=5FELqsH|wTWWS=kxA&3H^u66lG=$i?t??U>u7SSB_aM-?VeQR|816 zs1g&N<7&t>`<5yGP%5dV7Wejjx1)IYi5Z7a*Esp9G!7<}5@I|hqD~ptB#o(;Kqqp{ z48apbFXxWITK{M7?S$guC39EVG1Tqr>v}m|n>Kb{Pi>b0{krMWs>zkZw{~t%0>ScU zX0*Tm0t(4cko6GE_uRJrplQ0_vq9ElK_^Ujg09=|@pD}m4ZM-Pj5K||sm2#WkbL1q z&d6ibJ(sjI4D^k11aQ=Srs)!j*RFpwyjPt5;Fh#_Whp_fCV3lpHoh~jNA};2>+10MyI%DdlK?z@kq=g*~ObM_qAB+ z_|2C!Mq00w)@B+qqyPJ7rGO?$h;FvXPr(t2S7yNS$>!9;XiY$Ywp5qfH`ppc*VZOx zQ~|yvT!T~4kJq_gxxpPo8^e?A=3fnuje_H)j z4bi`PpuH3PzDGyT=78Qi zXp!*sQ@ED1E&8QqK%I2p;l8Y{$gf__lpV}&n<{I$OeNMPDShV>G0>ofxX(3s^X<@? zoZ%N)qH)*i+L!jnb1_q005GTNvTyEr#?7r0t--+6!LbTkd@VSZJ zN0NzRGVe)+hT^?~W1Ssq$ywL-ds&kiDJ)i#Au~-mvbzKIYDu_c`_dD|9kd3QJUh;7 zghrOqE9<|KJj~`>$dJEgSS%#}RNMa_C3$v0=D#F4D_1Ks7nZ*-%#s}(c6iYTHy_l% zUb9wO*7VSAL8}Wgtt<;t%@nGBx_3n~$l59WWm$=Tgx{iWrhE?>p<^ue(>5cqe)+a^ zcA7-K#fH&EO6wG9f5T>C@(?4n`B|f=hjd?fOz|g49p(kUI!BoOnSoO+sCGuSL2sr^ z0w*cCy)B=#o||$Pj#+{wd^8eY7ip%@a3JH*Y2+Sgv!iCU!V7eaUx=HO?o*Wg7`sml zFDYv*@)!h(e%_11AS@m5J5k4lW6;n3DM-XvtQa)CPbXL2Tvt_@pW#oeJ3FX{ z6uO&+1KInJekP|Mo=--m?9pxuIHz4DWa)F{o8PK`6o&n6iL09_Chhu67E;0UMd1Yw z9^awZg}7R$@t67cpc6}#%v z<-#1M)Qa)dY?#9})BK!^b6#v8a!+qpqfa!Ski#)XwzeP=`KN+yU0jXu>eO&^ z>9WoR{y+dguRk@j{E<(ok4$6z__E<(0Ml&T<7b&b?Eq^+lT`in)`_7ls zp%v@rc9TXK(78VX`pt0jAUn+v;Xc|JrK7hNwD>i(DF+4!kCxsQrYQ<*9MPx}E1D{u z)mV);OjZd>c4HqVv&C!w`)27`L9RxuXv;XLhAd#n4LQmmIa&O34?ch?#Mhr4sEzst zZ3?O-(t|Svp?iFb72R79g^kmK14tMUgKF2jEjs;?HK9(7B9ToF?EL@tLCfM_m!AH} z3~6&GoqvDA7HA(`<&WHyZ3C*k)1wTiIv@bpW;K5TbhOw)0CVwnZptHC!8GrS4kqYi zQ0IDZ!#}*f#X`7OLxH-9Pd))0Ye+%eAum|acvBMWE!h+Spq9@)*Z}y&ssA<{c!nbv z{=Grz8dPBH-lTPabew>N+!^_xk0!o(uW?-x55V zq(cDv-+I$A@;`+m=Ksl2qoaczi>sNPqph*4S&_Pm(kdIe@0Xp=nfXGsWeSM9#wIZH zq7{;f*m1x?EN3j9JL%JLw+v0J4<8GjBKxIcncJefr}m5U_LsFsh7J#+U=`A>zE$BD zuev@M@nq#{OT@1~vQ-X~sCsI&<-bW%1^K1kBwEt&65a0fcy&`Ik@4E0BOnBI2TC|H zFN3PtV{1nIK=HHw&Q;z&QdM>738MNUgynTM3T4y2su}6x%y9&Az$C>gu}Nyi7mssF zI2#c`1=-c~kXuQCM!~@-NsJ@)X|O6z3=5G-O9M-mQE(Z!3vGj>={>EL{T<0?@}(qt zNWaKS08??Beo)Yy*PR51ogcPgi1m;xoZ~p_zoFaYEz4BUnsgKSC z!t689YGDhACda)$4q;Vx;o0We1QGnp%e_qop2W<1%z-`Vi`WBi+=#C{bHr0+s=UuR zE;LT=2{^Hs15_Y?Oczx^f;OvmyYWy`zruCp0+}k|L#yJFy-@*j$N{7yT%i?I4W2l@ z*JTl2h!fIz-__#GI{vv=b91L9(Y-1DdhY;T8}iULUGLl{&l&N6J%&gYT@k1ErBC|F zv!xRBbcCT4wj_z%O2hpzcB}GlyV`y4{zU@JpURlt#Rvn5?1bF7>jIdW9@|a@F)jw2yIT z*MO{4Ni)^)F*c!9#iD3I_kf8k7|gQeL@H0Xb`**=(Iw>5t1wUt2JK&Y;p16n(Bi6V z_c!P~q-HZk#N*s?403Mk3$=305!p_;eF=;+6Wdw6eN=oUcpy9H7v*-S@Bgt)4DiXB zJ^ov`-TlY&ORoRxLj<<7Wz}#5{e6Cv99^v(?ElM@Y=95}tp3Moe{;8UT`-zSLI+jn zytbVj9h)38mM{@I&=I=4R)*WyxPj1(C$5x^ZYzi)6i!8%ZvaUJK>|B(1>B@rk>8|w zqEQrbXSraV$8piw$%AkCa<@Cy@saxd{mEiKXIBY)^q3Ix$*2}AJ?IHwO>#<$L;)Jm z?EV27kncJJY7(y)09wN>s)H3~9a4iD!0KeDsJ~QCa8~5v7&su3XS79gFay+~Gx!a- zL9y!z;28#2L=u{HC=9Neb%+lNn5|Kn7DT#_fSV&T%+@G&g@7B3yFNhsB&UqXRkJnn zT}8m(54$4&l0iiV;moLLNRzKH01v~DrIB7{uMv^oldg~eA2houKws+JMW8R`?xxvm zNaW_|V`tMHiLK*XCyf#BvELCrOsC2#}=3RV?I_4PF3|ZJ1l~XHKb!@-0 zj2?a1UksNRCL^`ePpay;eg>J!l9o*6(4#2oMTZ&&yj|r?C{v%+X z4IZdHN@fxS01F53X{Lt7K?(Qx{4(@ITUv~FqXD@a$lU!ty$z=XaCi%YqeI96H=mSK0Tn)Bsw(^Gl5*@2B z3mw}q=*h7!DCQRd_&WgC;fFJvr|E21pF4E(kGgLWtN>C3?A^)lv?Bu0l8Dq+qF9e6 zzrU^x7N=>Z4ZVqXZ~S}0m=uCLL=2h=k;#;W0>NKlD8h;zHxWu6&=J}p56}zogx`E{=#W-CLUjr34_P#LMvRMVM#-iT8mze>J))a(@`f8h$<W~g_HG{FjHM?HFUL>rS8 z_leFF_CpR0N`Ms;g<;p;&=XmxCelRVN?_cWCa+Z z?crL0heRP#v!+k_DeNpmd8?q|>@s7jPUeU9aeVPqliAYaU@%p~0<|PKg-RnOzw?nO zgThqNI2BHP{ge7Sq5)u^`H=)SKP9UVmK6~lhn2DFk%ct|89|BU%X=fytH1IXcC~Uh zM}b_+%MJ=aPclpwm@5~WDo>0MDX?e&f|2H+mWkRRm!%nh!o@ztxv8^ot@pQWNmOiv zVAFaXDPJ)(qm(l8;NL?AR}Jui1|`~8&)~i@B1ECckarAS&08WR+$wV{@C8L&=zn3K zZ7I5qjg#A1f~@^csU|6OFu3cha$x5W_=5{S#(vM;^McpcaTE6kZW+NpTfn2 z45cN;#dAf5$fYQW>AN&@`2vf}y(l$~E^4M%AQ{fy40D~V>LCRJ3w5|5)$WZBzx@fRjyP`u2-`ljVbkVb3^-9M73!j0Z&Wom{vyF`?gr z!0Dw@i%b(~R4x!S+>~8eoGN<>2USPqU%eUHwzm@VMTuGXWo+7yWL3_Ps?3^*jlzbMx)ftQ{uY`SqLNUu1Le)%e%9lKv)5p1az8gs*x4w=o?xRCv##3dHz*v*T7|LC5s?RXovi}~JNO~vej)CoS_wxbC ze1XiV+jDr86FK~Lgi6{CIjY6F54eo8=qqO8f%5U& zl)TvW(C%27iR^VUPg75JnMJWHx-)vxxdr|xac`7}M>vTS>ENb?PNd$0Y zs*>&N{7tHASIHu~-B1ojJemjz zO{3}jVHc$Q+-n3k=?p?#HB4%HIAk5yzP8iyt(e1?m?OCuw{wgt(yvFQ{I#(kU!=Fl zbtSX2%F0E*lq9&Ho(y|;YM@BhoU#2)2eH^2Rx*m0itXWS1@~hC2(~z@&MG`!-nf7N zxVbrhnI|s*5UtAs&UZH!TrpYYvy?U@&?g<_^iFJY1$VYevOe!Wv$g2`THn27KeY9! z?wU_O2eb?Sy$LrY9!qA#F(~E5sdqZyIW4b$K0M;>8B}p~kjCXCQL&WmK?>Ad{^s4| z%@G`*e^en#({lG7mleS-#40G(J0hzI?<)U#kPqKf)a8MkpDu@67VG*gTg4A zq9skENm8D)!YM$Q6A}nov)ArB@4#w!Q z1=|{~1_5z|8~SR$p?iEBgspXytkntESfrUnZ;_4N2a$j940{OEFo> zVTNqujjgJgyZ^ZhL_ycOvUl$IK@c<>rIDmQM21ZUT*?O0TFf#KU1U%5>bfeDNeLg$ z2c2qB@F$C9W4r&-30+WSKK0vJAO4lQGG`X$LpqqyVe&W4^8F?Hy;CH+YJ+2kS?p8I z;J*VEFAFilErz?cO-(Y%^pn-Ah*xWL%kr3E5=E2U!`|nlnH;55on|)p`2^BWq8&y# zrtcELYp2$xSbuN$yZ&Z76i z{Fvr4-2Cg-Z|5we0_wIu8_r zz-X`!m*%}T84jw15cMpIZ|@~n&Y!=kanC;Y#koaDWfo8h+@py1*C?z+DUn#l$yHIW zas(nM?|bnzfEjtKS;9#03~izQGn6*+4sTsn9?wDlNucGwGruZNU_fW28U4&mj!DbEt4<(``lUCy;o#8wI3c8)^w^(T!pK&D^KEW*WQR+`K7(GOw?4ZWs9 zY9YGKQnp073#1EMCUt+o@$slQ0Z~b|rnbA2?7MC|TS3(YE3RC$FOK_=Cn!RW+04nW zg(S$?CWZT&hoc7@R+d+o2Mu$got523w_*u)ybb^FnvU-vt4~x^} zu*31*DovAS=cu>cLignOuj6s7Rx6y_AvDM#U`A;~@#n-BEGs6zf}Z!egFCqK!iP)j zgD%w#Gl6}{b!f|3XUf&rGIL4)@w@H(3vmX^A+~bP`NN7O-{qK{WVBLWvBHoBfm(=B z%S*`yH>;D@h?>7%*?ph0yN~P3I+;)GhHhFi8XHG#uD+LihMdx}x>Zu+@#0+OH~d$5*&=@(m5*CpLcwqYq?KSxWMK@74Nc+X^|e ze{WlaC9zdGt65^jTWJ}T=cGH{GvaEU;Bnc5c$@5aTKaJx4TeQot83XJ2>L6>4`f&} zadA&3q*T(9JJV0a0v=jJ_65h6DyQA)jtE#FPffVKnFj%l0P6DBS1(If)FvuDUZ>j^ z;EPU0A6*8p4}KAK<=c`kPXX_C>OF#B&sTh~A2MBZB=P7(1SikrEs5LgvSTq0*x2s_*Z@tcD5h4|!MB2`%o zUIK;Q($1Q3FmIDF5Am4DYo*5e9qfbSn;G@GcFA0Ky0wuQ!mz0u^!iHi{3Gfqr2Y7> zc4{mSw1TaeD|1sbMSKGJNF-rK+_c2UuTw?EHveQiR&uP96%v&~Ndkl+F0d+^&fq-Q zKZGzYuxy$LA|EZ9jHy4!Audu-N@fj}%GpiJ1wAyPRBE%$WF!q0M;qDZyST8L6IHGT ztT4QnkMUQl)qS=U1e^+>a4d?u1X3Q~L;3iMJoIzu+Xao$P6)IXawk@nKehq@eV#i302kqEMvnEk7C6Pa57J4-e<)e-ikFrk z8Zq{_@IL-zz08Y4OAu#2HSToH3M%qIu-KSm1Cmp4$Aua@p`>+ihrRm!{%p=mi&*NkF6ZVDK^ z2QAK|?h%W80Vob(j+ewxVNz*ve~0+WHjnwfF;*c0&IW0evf3v{t81`@*mLiV55D@jfOSygT6@Nq%V;(P+-#3{5GIIM^b!YQtY* z)Q};|i*Bson>xbpnw>l*mCa;9qyLEX9+jw3RT6I8e>2)e%O?)9cux z=UNR#U*f^5wI}AWDi?XC99sBqLT-ONcq7l)0mKI=doVvc#BZ>w!4Q)|r;n0FNJGelN*qCbvFqMz=S z#{29Y#~s>Cx^-#Drgu53$(H*!!{TR@ujl-Zx5>i{KlS-rrwE4UT|pQ}V;x$x=vTSX$C+1O_OqoQK7Uk1=o#NN&`u@%jFrH#*Iy@QV23&z z>C72yBY46D!Vsc$7pV@7yWhc^c_UtVfCvP>5#`iN9o-WLVtq*HvwXvE0X~I#fnOFW zxMrysM;t@)hY@*3<<{xlN5DuD3s6e+4w!PBqhXlT(uR)=f_v+Go8(=KCjk&2V^U_n z{|yV-4|a-dH{Ph+WFwSh0x@WPEZnH5pD%+WJ-SHmDg21SH$)Z(%%+!xrw-Ibh6X@_ zX92}KPFNt}rbidsNpjjOd!L6VmAmAln_>x|2tF|MyzYz4xjPr86Sz z8VtLSM8>EUDBa#|Nd7b9MDMcncdJOuAD$5lc1qdo!ox9B1AU-ul_kQ+HM1Zn}TYZDI1f9G$Yeg zN&0FDfa40Sksu|YN>x#v(+YRQYs7;u>y%i8JzXW()4hnl07|M5-0=Jxx2g21Jog*z zx{O{9kYKXF{{oR?CnG)1ppIa@9_InZ*lTj2(HWc1SxD}f3`y^K2H?@PeCm5K>g!N; zwzS`Xx6phonbq`P>yT{I^~z^G_Qx_b;~VOV7@T|I;kS7)CL0q{n?5*-Stx{Aq$*Yv z$6?VrESQ0J9!TO5aBt{{{upnrPPupP8~t`|irHUB1yFAF5b@+EwF4*~C(6skD0zwZ zI~amb{xcRBC-Z^A7z1O4pZS`}=^KH0V3IlBS3QjGg{SjkRtl9gFU4{m-(@`rWjzFB zJ-`tMqEy0@fbf4NY_r9{fMNicFQ}^yqM{CXgbRL?$|qJfSgjeItS7eQflD&^BvplD z@63a)4F&EZxOGNj#w7$`Bdk=+?1wA@=)6?S%}`8{!X>A0nS*L$-VXlQn0J?CAF39AI1e-FrGpuvps75#H#FpIo2&Bx%>n^S|)PPy%*JgyWj%5TdA!)_>x-v#gL4COwM z!UA6ljdDO(<&1ScxKszT(kqlH9J$hm)|tECz(tP}`MVP|Wy>SGf2C^z9irdz_olzv zPRyiq*4avoix|-qIpSH`T6aVkT@iO1umBu2fgI0b6`QJ6zY8*;m5r}1TLGYSGNUjBh zdoYxso^cIn16nQ*SLCHMn2+7@iyOL}G-b~o)zPk5apt227B6YKdx1dLNivzyK|O)summJ5%o#ICiG#2hA2FBH(el}|BzuP0{B68W*GxW>%j=5IxN}&eMldQK*)+9pHwFK$K)kl=oH~qc`8^I;+=XhGnK5SsjVUNCG z(9+mMWYal|n*h$gL@FYwh#{a)Yol*fvE-*8cT_PW;*t`#_<90G?Y9;&4u62{Y+Zbt zp1d*cKu}IIiLxGz+*`{g<%}%umUG$%uXrOjTZO5(IX5TVPIG!KG#+9uNK}^#gpK7E06%tCNUH>8ujWyV}sMP&Zr zYQF~Rz!Q2mLEqbwj%X)ads+Q+zx;kXdh}8OgK4ZVMg@zfD?D&U1L*!lRzLy(#Fs_y zhS8reVVBIGAaB46_Rb(NtOsWe)Xf7Sj4FO3&Wa$6es@!T4W#Lh$cu*kP}Psmi%J2t z0f-Z#HbGYPIG^N(1I&Y}nSm%hm_|VD-78QtPy9Bn@2)W7(Qa!v`P;Ng_%^}#4a@+! zfTK0ls$Js0~XdzOw~)|O?wAi zi+zx%;|D%N7LeF>gCJ1oa>nP+F>(VB0-uVa6AtnyPFH^9`ksUlK;%9|jrE&~b1Hr4 zo6~S&&iV8c*svpju=^SQv-7Cx1`^A)|23s@`m^JVaT+tMT61Z8#3Sl9W zf?C1q^yo6Ude!w<@zC}%craSJ>f-$aM+w-zdJW90SgCFQS~@D*E4P3c!dsuH-(uBX z>^c?pXl&moVPqp7NUnj*t3b;W#Z2OFzBtg zXjdqT5tj*TQz(lumAP6T#m?!HN7Qg-X4af6`wriPWJ;Sx_jtvTdV$zz0-rRfqx&!jO_t8rte zsb3r&qv;YwA5au6uj!IO&!jzsRS8hT(yrKjssx45U#boL?SV{4vk4gIWKEsE}{ zj4UtI#Cp}3D~*2DockLMqj8f@Ppmc+ph>7QG*AgDq9;}z%G06wM$*`Nn2 zAY&{XB#h1@14Ip?ffbMh{s5mL0}j9{P_l&e+T(E4F&oT|5=-?S0f30p2B?(!kp_ps zb&#ED2$`$$OxBix)Yk&wxuT>Iy_Dcda0(PGIlbR;s;zp+obiIVz@IRzX}-eLWoW1& z5B}i@T&SDSx$^moPy}m6`p(oHgkXvR*d*t{jcZ^y_NHm%zyX80jL^p$5{BlGNx^5-1D{AZ7X0 zD+2Bv`)`$>X?41#%@`)<1KDstIBQj!36OC?ag6~!t?!J`u?E%JaKWT8Nd$RGS{KtR z4*4MgR0U5!^G@p3gWRINh6?II1MvqrpFpfvTJ*7FdJ)qX>=Yo;I+IDaLe z9k?(#FkS-?*Cho+0uQ9RRtDdu0E&=?^)6}vlDedUB(R`W4M4tUZ}CM35frh8&O!ycJ_r7Kr>cIIEICL+}o`4QXSPpa}O5 z^gz9%+Ur*L5Bg83Bcp3C_YdU{yPVG$N)S$u($V&9hx-RnkKxhhVcPMVFy+M4n^Mka znoIz&hpf)vNLXz|85Z(d-lGqMY~W0Z7Q9`I5GL)O*nb8W3R_L%+{2{t(an_o#%2Hj z3A-nblgvuu8`Z=pe1tPKILk|L1rdUGJ)gB7Pi!xRWwoB};u0?pk$&Vdx!M$xy+Ifd zO1tgP*$D-z<3MqkQVL7s3`qzC!FaGHGS7xZ%q$F1c880I;3QRWhUJySRCJ2rEC94| zXs+=MlF@J6!P&h_m@L)ou#Ku>*50HHCU?qCO__uhaV!>H$b}_wXl}aNX^$p6RQuA7 zO^I)^W>!v2!G!g25T3`{btDE;I1Wtl>qB2X>XQNAi{cMBu3SG547^V~ppf`8sD;B; zPIfiAHE_yqu-bLTlvjpMHp&o4;%at#D4koGDxb*>7O#=ouQ5V2J9TlYDw@M<7bSlg z$pYyOs;L6(d>WaSZ`eIlHyQYdwRvFz(Y` zkAQlnEmACRloXZDncBG_n85?~tkVM$D_%+!Zetr5nUFi6Y5f1q}0 zNcj26L$sZt`+%_>6#q3!ff$#@VGv#9jo&%N8bcM0ovnyWs?hQIg{qFR@a zOm}-)sq?7l2^1ByaIITVx=-DQy+@ws{Q8dY8q)17UWeh~Ln?v(ASa>)!sp+ie>aJ- z6LT5e5a#~qI|3hyeez(yD34+4?l*Ea#KIsOi2j%d*ttQ))4RriiKUdRSS#)b!rYa|U zc#4#5ZEm{0PlZ2!9LlR0k)OFRYj0M#UP8#eAoDM$cD9KWtP^8r=La(VCS%uQgr+-a zu>93zC;r}p19yB^(gIm0NJz~ABsm*(6e?2OjdgmVs$=2qijGe7Aq55)Lw?sovSaaL z>Jdzm3F=toNTFluGpERU|9UGEl^yc=zT>G;9EMrmhbreV$Cex4fE(dD6~27MwA!*~ zF_^~Q2R3r+>N<5gMTZS|dB=1-G@J>xQ~IeM*t(Kbe%ANB z_it~SnI=psUx?m6YI!?8sL%GD*NfKi3q-cdD(mtg@}_CrcD$83D|e{8VHUfevC!1Y z8?7v7&@!Oa?3b+fR;)AWs*Tp68d7u4@n%4cQ6+NT1jv<=xX5s)Ps)rJmx+pK8ICZ- z)(z~|A0RDtBm1Fy{FXA5wyICw-7Bh}fZHTW!JV0#<&@@bDhr4!&D`3oXdics9BaB{ zKk09%efdEcFx~vS+!5|5;C%+8vSUoO6+I_&a6w$N>phTub0;|KlBQR-F~(&RtEuQ~ zB;JEevUwIo%jDYmH>Pc@XNxb9&yBEgLV6p-u}R&gkBwu^41} zJtDABVb%i##xSL2sQ%cGBvVXbGj3A~8*R_vZW|Esy(Lmj&V7|Sd}ma5YT-ck1MT~t za=jF#*_2b2m$DdvOT-4$-k{NCCbadjl%jF^$p%?uZ)$B~KnVD%RH{A|`%@(+2J~at0qY)`T&BAE~vS%8E+a zU&SK_L(DAo;dq}VRLpb+=!8qL?JSKLw*gvfYm?Q!x?ZKK-$_2<@c>fs#bgvN!g$xa z6|hnX=A=>ygj74$aPpLd6gxVhR1K{XkEtrmPqW-jW2@=A{97P4gGo&7tC%ZowtY(ZfRNK@0dNUfaCo{R- zmOJsbQerHpeyAC?TU(s$4#kX@GXD@jfT?GL#ydEcKIB}|d=81BBO2Lf{~@*BdS<44 zl`e+Df_9%%1z=NI(UFG>5A+mAWfnf4KCgz6I_gXhIn`BY~UtFKbwWQ z4|x#yW6Ou!Qq-WuM%Ojza!${`*=$S>^~G^OHB>uXFOiC81T9H~jTvCsm-D~5kGe_4 z#%%y7{VTZ+U&PKLX&v_~!}F*9g=n=vGE$hy3gSW3>tdDHiXMg$u|!~n_x;B8(QaVD z^)x5{gJf`Z+rGZfy(+^-*tCy@*zjk0UHvB~XNzm$#g1NZSN_G1a=`H)QG9YUuCMpy zj^J>u5qLMmktkeFj=1f;s|+gxZm}gFr&&hp7(V-HKC`c|Bm@R@`{oa=p2|K#LhZ)E ze`4hOb09lY_ACSjyDjSs(o?c-J*CYJhF4PfVyxfQbDAV}+-?KiOWqQS z55*t{T*RzOwpd2thGPpeG4}VRW@{qOL176c)T@5_MSuRSnHqNG{v^F^ z3S*u~mj>7tEk_Hl)l zk%N7nM3;hc8^1=-5N990ytMkb=-2rBhvhfgoIBBr_zQ6qfmIf~+SyxT>4gL?PS{=> z)A)fk!smHG9cuWgwW-t4Z#;=r)YHhvRL9cNQg*VM%wQ587{R!++(9tzTi!RLGhzPx3hkZncrd!K4B$$I7leMUZMJNp@1~o@gyzW z;n|K@pNNy6(Bevo$EF@|rX?L5Gscm~;|16->dD03ZBV5&*E*+~zFt7H+gxMck2<6z zr#~0Jz#3q-t}OgEm_6U0Axt4kkx~x2Q;Hd}KNqR5N1W+W2B%K#S(_hXkR%M=K)H}8 zR3E@oM~xC1BP4cHB6GXDn364NIjImk)ROer5M}1Xo+p9dfzDFP^pYBJ8;^b*j5$;a z=DbP=q1{wzqRC5#P@6SZix)Cxk?3Dkp@ddf{ZgV}J!gFaPk>GlL(h(7=I~9O1#FteZm+o|^ayJz} z|5eLg`p&){e=3~Q?vB}HK9Q8vKG+?Ysn=9ZCIcpKkL?l1%%Pka*-~0Cy-lmE*DMAD zT~wXwKI@@a;CzX1)F`}SHH_gM20C@i3T|!_@?&?ww;PT1R*oaIdm}h3ilPJa8RMfv z%Gj|h%yO-}xF#@zN!JUZkpV*(ZgYn-U+0)AeHGWJ6&YoleP+!gt_z5dc!@DlPQ=rh zdrMXiv$ftmjO#AF#wSO!`&pbYmv=Uq@In2p&FjSlq0%&Bn{3?-BT)I*jV_{|mfo4s zMHM5`Uq^Cn0%?wN478jI;dt^!v%}0^Y$0rmoBM5;r@H>s77HFPV3d|-^^o$J*4dEn z?bBWMKg=^V@eyCRc0J8URO+`?3*`o9E#~e{F!BIPwRbC%4`Iu|pu#z-Qg=A^3^*cs zFR`B#J6(Sq@$tqTe_Oz@U%~x|QV#x%*0UYw`;_8kD_Ooxw5=f;w`o?@u2{=}d0bgr zPIGK7Jk1s3ZdiQf`zLC#WP-mm=J)t#b&#K8p?}CHpiClT$0pN$B-<;xW)EUYN>v|Q z6*Etg_F*1_@64>b*{Lb+#&|Wlck(+~^HxtcqPVbW2n@j!SCjd9+0aRc@tGH*Zy_hbpAkHW z0@6k*<}B?ac^P=I!sE=7o4Jf82{FDJ=Yzis%QRV__u?=AF@#`raL>k*AR8Hto?XId zK@YS)nV{5G(w)oFVX@y1B&oB^TU^vMHlB+knR-kF)C1z_7@Q3Os7fDu4-b-%A}87UQ_W1RZ+MXjY8dQT-`)B5orUvXBQ@8tk=bW zS(6NO4C7nY7aOx{rI63~!!6#-p6vC5nh}GfSASN4wRbX9Do~%+$`jIn^`Ys;9fXX9YKbgC4 zC};DRkh||TcFrudWS(OAk=8(n>_nwXX;lzgm-cieSp}%jrW}VmGoMK}6NQapaJxiL z!gq{b$mTk#`-fpMREZNc^=LPl*>bBgoWOiNl7`5UbOCAk)t0Vx&;gb^nY(&Ke~PBD z);T2S?6KeMFN&%rX=)~bl1ZdqNG>|meeCS)n;udDDfnAf9w3!--`Q`lD={vAa7Pd(SFhEq{9BHZNt7Vu&>=`+ zjCmE|f|Z!5pACs{GU|T&u0`qo-6f3Z#G_bQ{AC41pOm`^i!VC>eRvfFp+`y~44y#H zNPblJ8(=81T_*?~o!1(<|q^)M!Lr7eEy52Cto&f(@_ zq>UK#Yd^ks0?oS%3)zRE z?yTM->~+Fr_1u$(-EpueM>F1Ct{S*Ap*v@(LVrM)J{)1L59!~3yTh*a zEw51w6pd9+$o1#JYG_v86Ij*eQ09Rs)vhynpn*g^2RRq^H$cDUTlV>+P3hh7X$Is znd)X{{ogo)eiLSaI)Y2LF9}@`m)1ld|MZwwFJ!A6zx||j7>)&rYhPSk9o|LVL{@cG z%Jb6^Phv_U_45WKL~P0Hs$8p_1Pb2X`YLti#YYa1D|N^twv~U7M>~F^=7%54T+IPs zzcQ{JR-gka9H6O@LCeKX1wNDE)Z~A-e%{Y1ykVL=p3(qwky6;Y%52q7#hfbdXqLt6 zoz3NHNH}jnp*>8d?y&FTBD0A(a^l(M(W;nu;*Xly)vu#}q8aOxVi-~1FrW2h7WUN~ z&^wVtPT88oG$RvR!i`d4SE|^6F*-Lhqp<%iIMu((VW_+=n(p8U?9LTWzZd__tZbYp z05;JNv>513axTjlsZV(J6w9PD^9?;%@2uKQ{q6V_p#Q$MIXrCzNZRuX4jCr2^VEZo z9)`XQktV+$fjzT&^4Wu$=7BZaz(e!)CBT$H?kEV`+K#E~W1RTjm8n!{C6O|oN!PDL zN#1~F5dj)ar%X`mv>~x*MVS-o2>~j(<{w`@_0f1&XS$e5H(>0I5r#(}RpSxI(EEx} zn{sau1PM#qzz~%GnK=)662!Z34oTA}O2P7F`?n)i%fFDbJ}%|!Td;hVYhOzvfBDpg zxUp*MwfFq<syGBmALgyP)}JsrxSIfqiWt?R3N z$a*H_8%qIARiVuu#w8sI&(lhqa03G2#rP5$d_Ia5EU?7!r-;!j6G<0iY%hC2Pr&5a zw3hD9b}k{^tQ8t)b)kdE6G+6178cDi)8M&!>G9;K+4JcM8%2ZfP?v zTDTBw)Vu(1lf|XIS>0kdBg;_E!Sv02Usr+K4}0PnG2f>82SnX9^2V;9^%P4#isrN? z(loW0m>9t9wLOI}S~IF@5u*llL7{zPTz!0o}{FGP&SXya|R!qBkQdqoPW!J>uATwBYx3t*v;_u z&<*GS&akz-afN8rw3?-iuC+`yaRDgVx>956oEBQUvL^uh?jl#VlH?1k(Sk9J4`=`?%?FVbwMMcvnlf!dh$#9-hbvoRQxYI^*H*kjNGOvW~NwdKVx98 z^KMBG*2cW^a^cQkVaLz&`!|I+WVLvF(|Eb55tXi#DSOt^jL5y?{Y4^{-+Tf}Za)r{ z+BjPqQ%p2Cc36sS)#e58dfPmE^WPL`#z?IUEGss;A)GxzdG%t^*!-QLk=QuC>(*0hmYvTv*g?GaybgACur1)=)ZI@0OE1G>YzO30v z2Z{Rd;t38`vpMamTdv=g$rsaOE(gVj#m)U*eg%xSVh(87%?+I(hpV5cF95#@QmC{j z;#o?=b<_Qfqx11Hx10sj=1Z1<00zo0y!96l6a8W870(Oi&AR( zi%C@NAK1Lc1Dvm!pA+6_c+|lxid?xkc%XR=ojVb&OyyRywZcz!UYgo&=@ujuAgw3} zBi8O9j|ZsxSq_6#?a`AP#+=c!f-814bizQop5PpfYC1Ee1HAUocl8sb+mJ^pvtCKq&3*@2}WywA?g4dnm9M z&om}j_#$=Nys!PRF$d-rx3Rr-?hpb&NuAM zA*X{P)~+|uYe1iHs1+q8j{l>MmMqhAH59oZE=N=~**|*=p2T}hjr^MIw1}EyUx-k$ z7%ha7P%V@^Zz`X*|HU&IY=Knbh!EVXZ@`&~-r?($4jnBwncNHhP_v$XN=D(vMO@?n zdDaIWS3l&R4>L%R+n9SM{Ky-*bI5OCq!Y!zZ&SuMq|5928xJW|9zk{2fHZrEk7@Rw zB1}Pd_E_d|RXG9V3gFN0P@mEq^jv18F`bUmCJg>WCh=47rTqyR;3zZPRKw15cATt` z21;g=+}vT9bDGJsc(?U{=smKWgph830m1>A-kS}G#+$Be&v?+b+^})1 zVUWe;h3e@dQ#=dc##|A1f1}wN=Kg^(^DSN-Vr$}-ZR%EwFtn*3RvETY#n1oi;kXgqHFP%>CkVCyLiYi9`JPZ8Rom9ZJZ1+V$Fl2(zjO&WroIoRcUj~D!)n9MSH6z@kCiO|Qvf%_3 z6txQ}Xis9rShvIc`HAsdsTx8nxJ#J>OB&6`^&RY5Jv)m90iFRKuW`}Qv@|_Af>)?| zSK~|5c0*wzUAf7pLjv&waiu(GGwX=ioCyR9gWLJ)SMV!FuYmLR*W3URnCz;$jW!zK)es^&VlXlD^SNZL|_8m+t1V?VRN513V{Z8?7WxI`jB@pFwtu}{P1V#;^O7Wh3{FF2GDWYWJVDW>7j;Q;1y~a6<8obh@Hc$x z2p5tU&L!v85&WHDHRtUmOPQ{)RQ%{RHx;fld0y7?5M z&^}kIpSQv07OXv_+vzwYZVrVDiiK@(5^i&3n6iy7PC*KL-V$)S5! zOiBG2p!<|fSuWfEZFvMBY>`0=X}THh1%F@fe)mdbnOU#`e=Fj9WAP^m3&a6#t3T%P zws3JRH&n~Ilony?sMh0AJ<-b`{vm=I#(ln&B^%Mzk?;g$SG$wEE$nk~4aap8rsJiM zBcUWPlwf^aCRRCDL1oZcYC3~yLfKW&^F&n!FKJ*A%4_zqrsQ%Y#3yE4Dh=r14ODS0 zkN)O;UzXMa+qa5ETl|y9n#qbhq63KIbbek&sGV1!XIwmgI|=izN*+YJyiH<;oNnoHRU`EIwq+ zl*cg62Z%vIN_x9Ps0xb{L(W4xTsLM)7_Ps^x}Q0Eh=8KUL|t#on!y=KXOGdsjJmKI zwhQT*L2IEmgy`9oKI9p>X5X4VpgW}RDUw`IGZc01;or?LzUF62_Nw#UG%s@KoS{Cn z43?R`I?2egEh>SGX#Tn_cVB6(Z+LbXbC`k22Q;~A^}D=`nZuUP=Y`iLw|h1ncl(Mb zk}S#tgJ<$Xq+ioG&_z`ERO+3bQ_U#?$xv;WdmnXaaUiNVg0naRK?dcn9DH*;{3=Y4 zdan~1XS@Pa$fUGs&{^e7_PJtAjW(NN%tIYjok`%1=e>W$8_Ta?XuR|F2nX{Il<|jH zMi+q@ORJKPS8y=lzvkf41m0b~*+H#in!gd`E)WVB++ty?& zAzuesl#ancWZ6MvmT=6`nh8q3$le4rEeX2(A_R-k`V++(7Q)T>==XHJzp|x7q4T?) zzXjf-++VnN9xu!TGO|m~zcraLfLmMKRY#$#YjNTa zl$T0>SBQ0KvhF#YTi@}mog~tXR)8-dtf}tZONL-)g!&sBV3-K6FD{=h6W=5jV92Vy z*T1AMDr*g2Qjk3u?)TA?`gmL3gjk|z8iQLnRltMj;DWzyDp$x@X&ysY%2;Wdof^XX zCl-IhHeM<<0@zN)-Z290n;a|dIPGywY`8!PBzy`hi##66L^7OnG#lD!imY=y z8y=H-X)ouv20%RndWNp#I)^QUZd}2VwsBR(f{Q`*ur6a?|2$jjJU4LXCda z(Sn#if6{7(0b!MYTd5Y;)Yy@bCA7GWrRh5LY%~J>C?f^6cSSHKn5!2F*9m zlFX~&BOwbc=NI8zy`X1o#0k1@*Q$L;>?OYu2Oh2XS_i` zP3eLwl#lutK_u0=Ra3#Cn{FPE9|ZqOO@M~^72MsA-D;`@s%6((HvrW*6E*k9LeK2mg&24Qw_MF^!e$1BDZl|heH;Fs9a zDGY~QkT5L%ZcVa z^#&JA9q1~e$ji~OFsr?rd=r2hHSSG?s1kr+mTYHS>B^`EB|&Z;`2@671e>Cp21E(RB9L`2I8~Lm~%!8JfY{JONVw=i>^uStL6bn)TBI5$q`>)%haV>v(%1fduu$miH zMgI;TPv0Nl1Y!X-fz-GgID4#iN(^`P2?YrtIG{Q367iB%be#pjhGw=th^qDzWCfj5 z5C+19LeABQ9|D7#Bd^#+ZR(B;30j-4*mb+mxQ2G8=Z-Xhk2NX_;%bl@(Bt>P?|~OI zi?};cyQX}keTD}H)g`E0BdNmoO6U%F@OK54Vfw~Z2N7#OO1UDW1Ac6GE8)*CH>aLz z?8lm!dY?l=N{#c3NmnUk7GXHSp(#{ptu*R3Drv?13btNscIDu7tY4Vse#sY;Pe7M{ zH>XQmT}ig5{!V_Lp4vH6!XASz?Kx8u8lx^!u0YIeJ2XA4N};YMI=)I6R^6b}=V_#P zQ8n6VvDEQ{+@uP(#;`4-y6QVQ8@jfhE=iS6t9?cAyHyK$78#S>9B(leanrxpV)uu# zsIB=k{}RHUzsNcRwLKo?~=O2 z6~&%orZ?H(m1UN@7bcG9(_{TCKIqAXBS#L30YBdOa_v{+os zLQ~HiD~jco>D{eU1-LopI$LLvdoxFu4;L_M%8@nGBa}MvCW)t@?Ens{0A0r)pEk~> z+4DE~#-$=%YDNghF}3IDj8m30hwJu;Sw-ryb9d6Z2S)M<<}QLqV}3Jf1}7`9Nk;gm zn9-gMyIN;psKUakrJP!G3CqF?*4D;?{?4k#&h|Ez&532)s0{lmx_@@HQO4g)_Y_rpfN(jGV9RR)< zx(3CfNn&TQN8J@>t~{8U6ea?9qYetdae)vu%NVLB*}8D17W4czji)fZWsGGgo7v z*A)!sHhu9)`2KjBFX_UPl$8ir&qE%PY#v`#4pooTRLR(l_2q$e+=rO_g;Hc7Qrqm^ zi@jp~0S-V9K~Rg@GYqc0D{2sZ-Vu9IPKvW5W`>gU=3j^edcG|@_iGVxY+=<$r|c(uF@?>pw#j}4#n$zJSF z$@Y#8Q6)CkYPxopFK)T>$lvWe|D^FzQPz2A$;9%gQn6ZP6m+fOka&|EUxmO_45TY; zEnr$xmJ8+UP`1IcN79U;xrziTR6()d&OKpZ>6fbXe>3VYxeASYjR8M4#>+S}WC?+( zjc%NDg$$p7iP0^Xn>YNqBZe9ArwOov#)6JB+AU6liXK+ai*_^jc-X*rV3;u`MjD_% zWFz*$zsk)2oQIxL3>{yxZ7tLyiiNe%J2jpTFdawahiP(DrHrW_!rU1lgHS}@kyxDC zf5rFLD#8#QCcb_DwQD7h@Xue0InE3%z-?yIXj%r5)EDXLuz@`;GybmyKN(3}3cpXf z#nZ9MOw>cScET9ls@Fe`WzEls&NF_j9}&-CDhW8F_u}(HAwCoB5?<;lNJdU%=s&M0 z`NV#mqa{F!W5GtO)#XPoTT=&Ub?zIjJxOgYuu%v3z$&5bSjmszo(ayWy3V2fg}Nw= zwPkv+HCS#$QXO*-$7>xD7m~f-Zx?6GH?P?Z`WK3m3N=#r^Ywh3>s2baM(zF?yDC0l zL?EK^rx!m6LG~s@h-Mh$*KV?>DB|Qz2xc~(k~(A%m?%p*s_X#k;B7vWKj1EmC5^xG z$4Jm#$Z0>7E4tbI%tG)d_0v7*;$8x5S@N_k^ zH#IXAbu%|NbN(+qb5sAXQjXSa_dlJg`TFVyWI?GC+8wm;h2Sq_k#LcNe_92;%cX9a zcSSwtdG?1yEeva6dn4%ds*};lz+eLxaVDXF?KSJF+6f$$*XR!4@d?M{rjvGv)Ij19_ngEHNBqe*xV? zCq3v-q~e0W?rCu_7g);SYYR2LDmyeC32EUJDy~i`{Up{9^}5b+QeXR7^sl02%XebZ zJ5$!o4d9O^3{uW=@0@;#mrQnuWQKDTBOk#9m{@~$A~9O+oVN-SqroxbqH$9}u}gQw zIfv-zOTdx8o>Nv=n@dYnUUfLuPlaIS=xJ zBT=|xH0`Y}eeo+5)_LP8Bn!{7o$AhtTmrI@#Xb75eCP+!b{)Pel#;2(u5+$_9q5BR zOZ1GYb^IRG9y~1GG;pQzML5by0ICuPRVzif7MRjx<)-5-8KxV=m2NXWF9oKP42fD? z`nZ;$(?g`Ha)4{uIH8@}?`7n?uDj97sTzhkMMyOu0%b`AnMc_NzMAKoiN)vcxFaJq z>DdP52>V{7rykouH2>}4QYFwTR0IbdU#=`(TVrSO_9b=ocUyClso9HLL9qQWF7F{N zpSZj7mShc`ohTNnS`{anMjknPUrbR}_`FJ{78mO8(FBFz6(Tah0(LkHI6hRUbr|MH zJeEi3m5c9*7lgXMeeN6MaDL_~2P0UbWFjY|jIXMVtjk$9|2oO!b|7ugtkqS(#=KxN zeT0I4l?9A0V5y&wEfk8%n<=S?1w9Ri<0C&JsXXSW*5+(8dVOsaqO&LBcle2gj=v#b$Lc~T1~(tLA(T+R*5MAc}^5BKN{J$vwT{+M6p zJK#AEXy5Mpk1_T4x`$-ti!b-|KNf?i{hyAh{~lY?M)sz*X3i8W|2<38)ccQF;>QvD zjQn8oVsar^GCV+(mcp(hHA<9L)rxj~QB~Y2Gn)b;DTA2}?vTy{6HQq~Q^VJ>0}F+b zSR27Rq2)dXX6<$a#?bFzCXeJQ=^n2zKRL|zIJ>8>=_%K*HzV8d3>8``G&d}+Rc zNM2BRPGx}U`dVmV- zJ9sOC=Tcn8I!KB~^MwVW?w<;&IbfY6e41r6EyfbHw4qJ8(0>UE!uo_kV#q~5txsLwx^T?o(SY2PqliSy~VRhT|l?2o2&-NG`&3G8!x)@9xz1r z&KBV5$C_eoH&+cAQTiEMR_Q~xmnzRcdpS5cda^Y_Pd}^wq&~gpL zRMl3Q-A-PxA7O>wnanA#DKnVbPKvc+Y_pLZyUfI8E59q^VXd_f`SVpyhe%z#6-5gx9Fa$w;jMY*L$3s((GxInZ&PFpX#5M#X^Md_axz09oy?z0#fkLi|cj8 zr|{%DDZR2Mo0ELGTH&%Owdz>u7P2-q?$)h})iu{wGPRvq`QG#*tr|yFNps0aa6e+2iG_@Ef6_Z z95$0dd66z@BxuuI(MXY^WYV1JZEbuLpvapt5lg|fxamHqcgKoXSGozhVze&TQ`>2G z*T`u9JWwCF>485BlA$6e34TVMp%+4 zl_y*~^Zg_J{xMHSeG#xcId}C0!=WJ74bRcgu8e=g1sXBHd4sb_&i6A%D?6y2LL|dU z)w|-z`M`UAz~w%HnIC#?8+EfsZ{+fUQKpH&Cwg54V$!VZakjQqusi5uF%S;G1DS5( z5$|9c(t*3*)IrFeKou=ik(@-oeLwX_=$>DRqzu-H!M7yV(5UG9RSJ{Z(3ELL-^`v= zw(;Y`u5ahsh~F8zpHvp`X$VC*0Pm9|SgM_gE6ynd_t7K_Aid_eCJJLH0gYQ&GQ?qI zPq@-ms_ci3jgY*UFn%VvuRoLM3Z8b`PM0Og42;S1vLVQ=8!fZoS7k&Yr%GV+fyWSr z&cuS?`C|1hAbU-yZv^;4yDr%M%`9&4`ND+Oz4%SBuc^Djr7jW=OwuS*3{tQfY*Zn- z7|pEs2kfOS))f|q;qGH)>E#!1=d}e(;V8>0MMeB1p!^k}{KYiM%l<6y{D%T8Xuz-h z`f3FA{UV1e%RxY5euMow;+3()z8?Dj&*%SmR`vf@fd6{_Z~1unCQICUXb8~^cekSlm7jFt z>(@ODyJ)Qmbi1u>xoK$~YCZ7J=}+Y6_kMQ!3;K6;{bzo)v-2h6dbtfn0RG$luom5R z<(?hgcI}=f;IHzHBH*v;jXi)*y<2L;u%caI#7CoBY2;PIS9k;ULfz7eAP&>CUVD7N}6+vB41ui7Kk2&4<4qxjl5s;>}Lk!YYBtuT(Hi`5GK zLIJC@=mAP9Y1AY-=&;I-3%;l*UpY>sN^UK{afNXa-K^FKRAsv|0J~z(Si@FzgtD?- z574JN!ucOb(40!{{K%cEJui*!%1AEtn^L-M)se2sXLZ1*=5R@5yF6MN{T{r8enq$; z-5#BUeoeRyrc(`Bt=jNIWV;Aj7Qm^9%wBbPCbC@uO%Kzlip*YZ_%d?6GTb0?y(U~y z!j;xIj;s^VpoNA)Yn(?$q&Z9;>8XzP3b;upBhnhK1Kfm=`4@%n&=pccDUP7g)}x1& z3F8wbftMNmfDduR5n~*Z2C5*Av0}1BTcr0hCm6vGIU%ayD*k-N9byF@A`$^P5dDGQ z#PG%VhID~$i08Porrcp5eqapZF`ItBBizsmvUcDa-%rw5lk^EkAOsf34Y^z*58|wT z5G=|m93pj>{p;P!6pp=d`){xZ?r%+pUupn*VKCU@)G7E#*Gt^KAVj7$NfFtD)6K<_> zd#X_s#31)F*rjI_#Oj_a5D8g3E|}1f7g;t`%~6$(VMq;Ev|@K)0Wk?jllkj(6+;2s z83QxSSp}h>3w6uk4n*n_QH;Bmy_kyKF(|~f;z8c*A?COOV8JhtsRD`MtYaX^1Cfc9 z!1VcxEt60hfyp9_fm~Y>sggNA{8OPgN+fd}RVQ=&@Q7uTVc3F0B}9ptf9M)~s4NpF zDqwBcGodH~?m2^sYHuntefXScthqvTZP+!}3|fgf$q36Mo8-l&Fk~%#ZaSr12}5I3 zFL=(TFxKqHG@;zJox*Yyze09MpP8pF2GV*(L=tZ5ULMB{+glHn;=ooWH^9T%7}tYg zOo|-@+JN^}$B5Cl8MIgMY>>j3d9J6@+!M>x9@*XBUU^QZT6L^}da@}5eCERmDV-go z`KQtacCjyvHf96+R>k8^Ql&y>;wc$B9=P#zwRk>_JkwW&~L=!T?h6Xgw3i5 zKJ9ltox3jWYvkNrBH(NLzQ1oMh(m$D-mLtjc^JXA3j2mfp_`(S85+u<(p3dw~$Fh!ENv%k<2$ z`$Ycpp=6v+^)X%e4rRDF?DJeeCD_l!aYMQ$X#N}mQp??C%l*gZtA!C@A@ljmAoH2v z$#O668F?s={f6o48C+;#I6!-x=|%!lxL+TVC+$N_dcpwh{uy6u2Ip72*@_&*hcfrc z&A%ae4QG6a5A#|iX6u^VR?fTtv!69wIQJZ09prp%6MrtO9N@+t*aRrsIOdnP_c+RK zd~{(awUtb=8zzft`KWI-+JSzFA$zH5_A8o_7;w;;ZUx-1aG(KPaMb=W>?7H zhBC}1Zz`{2*rPZRo(9kc$U_bGzp)Do^ApDH4fDTf+RlkC^4wr-ehHhXa~BsR@3QU zdu_bTTlX-if-Q_Ij}Wo$(m6CZTmtt-h3ECU$Nz}B>z*4B*yt9 zhz@E-PFfLLidN7!-kpFIyOn#jpSgHEGBmxgDeJqrE(xBq4Vdm{C{99g( zfixLclv?L--`Y^6xG&RqmozOR=xNL9-PnQ97uJwADYHh7FRvgfBnpRK+8n(s6$&em zA%GAU&e8#o=58XCIG;wP=ctN4I}|A_@-wzB^u7HBHjMB_MwZISc`H_kZ1yYQ_wmaF zMJhrvmwd@J0pNfXgZ-hb<5}`h^L>PL?qx5L8BMD>tcO&rk2Wl4DZY6TTh^USI=#>o zqR*t@mESw(9Y@(T5QDZ zD3pJ0-@%&nw@z~R+jg6Z>YoHP!h)>N{`mmRG0TyJ3H0M|p?;V17lzxdr+Ku|-W4gn zOQSb7ne6i{8iv>25zoCOT|N1SsqTW&{JNN19cfHMtRlFeHTrrRPadPCyJm#l6DP1gQfq>0bk zbOEx4w$=lRAq(^*W}Z(|M(9JP?={l7w@7^QTI{qc^3a)P>i%y!^77exaqw7V;iBI9 zG7=JO6_bG@>a<7jY+uf?QeriA@#fP_7C)>Y-kye$f;ow||Wrw@{Q6eluj=jG8ZR+h>offbE|- z77_?DiGJyA2AYe(HPm-Nk+7%W`|UR8SQ*$PnMfPI8oPs)g9(?D#5)PYvn6*E(2V1kv~N!i|*n@f5q`t_ZmvEsJiRixh#3Q;&Ro(%7 z1>+5a*Mz*l*int#{4y3>_Qi4Y7#ms5dHkFpT*AH4RN-*h5|;Umv%_Dk#DXCo`r<{~ z7vrWn^0lmejmz4GM&kk)$P5i7%)dlMxLJ$W!tj5jmoHbT;!-8>EQ(j&_245$yj^TF zxcD$jH)=Sd7Pi{^UDL)yhz9TXf{$qLrAp3wL$y-b1jv{s27AlWW+umfrMNV;BnkB4 z*juxJPRdo?BQ~6>?I4ubLHucNan4LCf=%Js$))VoD=Uz&3@=QOJS>OzhV)a!_P9U- zz9oCI%^u0x-b_gn0c>L)HLyQE3%nCOraRVLX?;o6ewL04-#$xLB8nD@>G$R$5!qNh z7e3lA@Il$SyuL@rN8G$iwlJa$5RqkW?)b5`NvEm0Qt4nX{9PUvT z<@YaIz#81jaW4US9Y%X?8IDuo?}D$*xSBKJ;I4udIv7t-v)N`MukeDNM~MhCzaYvp z4_q+*Wh@vqXUdytuYWi#OCJ%kjzqH0Xj1g2cP`@*Ur(=`fw$m`8adc(@(nI$i#E`1 zpexCBtr1v4f3-SS@fS3D!pM<}0(xN1z-_u2eu-XM4~^^erhEv;*Co7Yu&+0UpWV&f zd1rbujvRB@OEzZCt%GF$>N5FgM*e2+)&sq~;5qyM?oI<3mUp+r5W40wv2*=<#867% z8h;h%*xxJsxrgxmwzVZ56_8-It%OB&4wZ!Z_QAjLdQ`+d6=B{$*qjL&I+Zy~!ib5w;Nip~UZnTL_;BshJFMa#+`Oyj zHP5UgFVU(Kf6^x<`H&L^>S7T%XH~ws?7xAKjQgVFCn;6p-5l?snsm=u|DxGt0LX(5cZXFQrp)VeENEwfi zqyBz5*G>(T+9c)FN6>aYzo!m5kdRI#f?v8;+W`S5$!%Uen5o6B!uS|7z9e1Wadz8a zJ1EA2e|7=%Q(C3nQ}H~0Ex<>36l|KC7OLViC8ISXKY%NFYFMSSj%Vk$rTl=3xUo@W zp0}3VSO`0BMgQ!+QAmm-?h7K4IUV>R6BP{mD?ZLWK~&kiUdeo&X`SnPa?o}QW_xgi zP9k2kd8F!i_(4djQB5%|CFWE>#~R6TOdhwJGuwzN2d2PcM#9;;r7Cq8&0bQEnB8|l ze+%VlBw$TjxHWt9nzT?;Ch7u5FqU7+!6+B~TZAUt&`Z8}YGZpb{mMn-502I41k*8n zKas)J4KdBs3)q%A*ahZ&nF=68LUunEGLA>D%A#2wdMu>aVy0(#{6_Hi134R>V9vxf zw_FlFZ0sX~bMA&;PN+dkU`7xG5W)ZA)W4Na1A=1~85g4$jZ#h}`(^`{ekLnL`b+-n z#4%2;(>W|7o1)oioIDO$qG^jNkn~bMz%}bpf&X|#DH7Py);bnyVV)#3ZeIxKPnWvR z>%=)!FTg(})bz>Wn1d_qQ>g_B$ndT1XCXnN&w*r$%OOx|dEWvj%3V}?7RResZk zKpz(u*xi5#(|4xUKN?hQk2S&=f+=MB%XH0z|iKrNNPDAuM-b|=B z2M@Dy93j#iTn9PyKRPh%4{num93!goF*v*22j}=egM`KKk@ABr{aH5zv}+Kw*ILqH zPoxNfK_b_<558dku8GrO4F}Yp@yz?%nDNn$3L&HBq092Yt9GB};pfEox`i3<$+8ic z2Br;^c^1V0v8v9Wb2jFc(0ZuH7x~v(9jKUS%cUybik;OzX`i9p6Le$*X1PHWMye zr_!^gr#)e?t!u-!BUf$m9OSYUKI%m|J)8m!NvUE9>3=G!-XZFQlxE`6W-6*jg4)}= z@>@~wXpj9SSN@Bb0(xyT8}{dmHofFfaBg>uCq3*0_G_?iG}W@tvoqPYa1(@}h%fl_)APaCXEqs~iu_0|12M%U+zeIFdp zPp9J(iaoeC=OGxQ=yTfL2XUcW5IQlu_;x)v7CJ^_ABDe|7vuJGF&FN260AO!#0rn< z=5rS-I}S{xBo`xykRAaRpFPsYmTbw63MBl{F;})W?Yp4gJ}gKD2(9N{>@ZwQGv?XV zVUKR(FsIy`?YM%A)c9lHwh#9Uuw4i0xQ0abrQ5Ame+Dm!l$oKtt?(n`=iud$h?Nm> z570ar3wg7t*tv+W$N(#SRIpJ=SB9m(cu5|*FzrRrNLTj|@fc>#78k)lXp2#z^&#I* zHiNn|;1^q}mGOj(SaHY?m8v6qgXp;uzxOnr@QQut8^-!KD+9;23Tv)62jXf6$HYFr zF2lU5X52{Y;!UFGLNRZhyiDO2kFfi8!CIQw+d1S@k$3JW+8K@E<^VTouG1#OjTmO- zJoA-B*S3)-NL*)fy;(E7*>LVI&>Q3Pr3q?s=Pm?xmiVe%(b$e+GD6N{56xYLAm8W~ z6!EQX1F+x-RkewxGiwo(QqO5pvpoGWvLh(F21g{AFfwOAKP>_xk$X=v6kO6H%u_ti z$~vgSNf(FJ-gSd~L%W6Imiw|eYij=T8zR|umCv}l`qwpV4J7jjoR*Zv^s!LaZmaCd zt~8oZ`!3c&1~g`K8xq3Xq#e~?>J`(nkS%nFGrtHt6`Ga5CoW;jF%9syZgKS2j-*kqDZIzK|#)E%?dkMOZ^ z>V4P_IH^hh=_IC+h1;5RrkyyGgZGofybISk0YDpofNMXWq(HH>~RGS&w~BU~EplSz!~N(}KHa_d(o z8}+baE2+*7SRaDFWjJ%>7H&pQCG$CTEXMdBeuGvQ?z0|?&y2iG?)Q%-*0nlEojk{! z{QD^7p(Hfwn*JTCfuVfTR?4OH%&)$)-V%kMNg<)>5Zp#@j$u4ifvGit)4Sx5w1VHw z4<>r~SUWjcw5R=8un8h&+*#CWCZpH^!DD=OiI(Lm8eB*p@&HDyz_43rr=M#xo z@iSZvzpB#%a~(YSOhKAB5gz^A-5BP(2FjrxYNK5|6Z({JW&=;TXM zy#tY#^bjqrjkC0bhwp~f;ql1%P~SaD#uL^MP7=jH9jhR~EI>8^hBKFF7a6H$CY@wb zo8P$dDh@)Rv0#{ws=gtY*9_I}s6)7tCz$s^c>G033{6P%9A_jZ9! zD3h7*+O~EB>pwiu`qap|{O>$;9?$X|iBm_X_X<5o=jj(>9pZJiDVn+^%>O}n9sjw9 z_Ji5DnX8X^Y2EiJehEWcv9pnOV=d?Bx*~Cj{-cf4F&`sq^vmt_QTS831>uU)%^}+0 zkWqjOk9n#Hrt9S!cepzt3qT_)r-5IY6Ru&cXoey>Vg!n>uf>+|r_08cd)ehThN9|b z{2#9L!`=!g_bzwo9)Z=nO18Bd{V7WA;R99isc;ersgkhIO_ zpq~BtsiRwJ$wL+S!8aNa4p=^ryf;dxnlxtFOa-^vv(>mY9+Q|9%zcxoGw*a5Y1l{B zv)4H)#Q=I6m4eKaV$-HPJCkY^7iZ-`M7 z1K1uX4ygI5UpZ-IwE9u7>~G@C{7*}>*m7rcZne`6ONvL$`ssrNDwY8wC!cK8iI=+N zV?DZq(V5~$^SjeStUuC83Qbh}>sc$we2XEJJz^|D(OYp5H0Z){h+^SV&T z_zG_i@68G5XWmpDuKZro#Y+-#TOn!-idRf+mOZgqsw3jkO zLp`@EGENb~-lEx#rPKj-4)Q~>4))Bk=^blR>#R_=aXxPBoZV;F<{L&-VgaE2A#<5# zD?>2}2z1I!vk>ASAh8Iizx?kpYURj05vUJByldk5=)57`H){Qm&mjEk9Y?N@w7rM4 zdW629uS>P3D!sTT`yE6id@L|%EoV+;nh&2$RQ`ZrrexJ$$g9%`#kZNpiL zA(gLPtTC95mYqV_E11WW^|OkDE-X{=X4ri9r|bKpC$X1FG^)CY#Y#o zm*h?Pt@J`dM)+Ma#=9e)jt~N(1dUn$s}4q$sJ&Q)S%(+PT(+n{bTUk>n591R%ubaw zn-MU;qsn5NJUZ~O=($d&5NNk3zD}wDw6}u4NQ(`oF4+fJS>f?h&*U3;W!!?hXfaX_ zZB$fWm}#eN3vJg!0gbj=!4+r7A17HAR4w_Hr41KPO2lJEMd_^6Me$~+Vq|5CSQpC) zVpER35i&c<-Q|l{2C`ZSPN!7n=fSGXo<|uGd{OJS?Rz=Hit{fhsSy@ zde7Bx3B{M`5s%005Lw3Bx5xR$(D0}oWEV`Gwvbr-X%(du=%V6~Z?Gucf{(TJDGhdy zqH0gS&Dwg?8wK2Z7d-;9NU8s9BYD^X<*#N-vvx(if78B6Yoa@TC8$o%m+MW0aT`H-))7K`|ubfhZ;rrb^5$Q#1#xhMN?vN!Rj}CTc1)a@+#P zGDI=kry<^oH;0YmJE|<$)sHTIDIvHI9r84j$1UG6L1_&aFL-q?OD=|t#|JLp52aCw2}EuCLAa^;Qpgb#_UEB~$z`i7Tf+q%4pLonZv{%Z+%Dw8EUj*QH_& zEq@^KyBCR@dr?x2Gj9`}f;&oMiv(C<9LS~#C%DowW^4OU}^;+jVGfc5~;P#o0 zCs2E%{jGVpk?_Fds((1avG#Lakb3Bp&1+9!T(ImfZl8w=F!3nmahCU166m^q9fcvR z^fg5m*ulxf|14o;Wn7_nT$bu1z)>`}l~?kU2U(97%KHeeAIeInRrS;Q{0!NIT{-z4p`B0X4mls#E^#68vLdr+@+J@<6tYpMw~^AZ(N7LUL|(O!K`|`qD+p z0{=o&?|f+iL#J~4DwN*L-s2Bsl234g+m3l9K@{i-e5*=}Qw2;X77;~Bso4?~lPv@s z?!)Ir4NLVnvtkbPZL?APN$f;tY>`S|mYHri=|a z_lUL>U&K;kFalXM2)f9u@K%he}(D&4s8-bK|VnciT4fKZbpIcJvJjv7-eD z6Puj9&ab1S=l%2NTd>xA=LX)$PYs5+L{%uvIe!~xTtyyR+tCycyH4KSxQZ9><_~^$h*-K0{j?%C0#W$Vk(NExo4d-Ue{`&K`1ncjv@XMmV7JuYy1ER7OgB27< zADPSdwBM>mIF~04qq%<6@k7~Dt}ZOx|4dIIS~q}7UZC2aDGRHf;LGk#@d9WKOytzl z;@;uB>8TzeB(ii}Gs!^qGUM8S_3mixhA2ed?yaeeow7zlIeL{YzG{v6eylVk2Dnup z84@0^a++>@jh&~zL3ip&KJYHs%uoDBL}Qzw(~I>8vn#YQjd1Hw7JrLJxvQLFIb6w5 z1b&63UeCi5@os0oW3yzy;CytZI5&40f!PMD?Fwomp{V|aeS~4T{rvJ3z`)w&R`i?_ zfBRp!h%V2b6r>xCVge=?5t>y{` zDDeU6x`I6qLEc^Y!FAjPrs>^iw%E^5L)SMLg1N2x1W~%!qoIzc+&jKFfw-*KCBq#* zlzc+1uPBGqU>uu&@w=yC(D#9aUDcic0JjD+K{+v0gm3+4D_xH2nQ!e1oB62?=~|5}L4fydbW| zI{7#$E{CyJuwPL8C>=pE+Atb!m6P_UuIps^&>id90;QOQ_$crFxFxDT{&f&I;ZRUC zEGyl(lZYbxZDbHmJ2(V2X7*@4IEEB}0!oIovVTwx{>Ru9ykLKPP?V>}0A;}|+zY^$ zZrlP#`MRUuj}|h`DY2l`%jTe2u)+pd@_<9NGo;Kn$NHcyitlVkp0chMBJZSyk9*;l z3?N;dJgOPv_YN}BxKmvcRqY!;S14g)%sBR5#K`&nJyP3UbZN=kHppi5pgEP34UYWK zl2b`97Nq;w%sEa-Z!-3rTk?^*B!AvIWcNLby(Pg_-@_|lgQGfmUt*yVMcE zpiv-0r_0VwUSgW{<8sLSLX(Rb1LI84l<_(w*V3kYvzV{`T>6!f0D$Y5JbmoCx=O-x zHT?9bdvnV5*k3e+N+u_0{m=5k>Ne(HMLM8t^TG}f_v&|Vm(`~rbb!S)yDCub&+Tl7 zf=(LX7*Qi(yJ)Kn*a?G-NQ84mY+4?RL+j6;M6mx@FO4f!uYPkdm=>XFa_v=%!%3i& z>8HtWax1nO@mn$zuQ{H&OJSYl$(wgZ%}F&PBUP*tX`eSEJWQYdz)945{KlIR6PZOC z?g~pE(vmCx4?l-TEvChv!sU)7(^SP|9Uv#UOfU!svz=o(eT{5n)3~aoNq4w<@H(N9!DJ~sltb!h(I^Qm6%0#583 z@xzSE`DQlDh36X*gYJ|~@V~0`g8ds^+&`EZddpUx($xESrK@9vQcCfzezd1Iz2ioI zKYo_$9bFZu+zDn&j)Lq9QhV3~HELIKN>CI|&Ew3}yxwayEWs8Xzt;_K%eH)Hnnp9g zQ|nYHxl@S96@w8VtUT2bRipUGvuX=+H{8W>8a=5``~je_c(38%cOp(Po-i6ZVR&0u z<>v%WX|J!{lAl>9Z{0$ixp?wkbU~$PyRpHdS22dg5{U9Nhx%1IWa+jF=;Nji0gOHw zz!!9AR5Xxl$M+}rTg8WD;}5TsD>p^RYnrt2HQp-=HIEjEkf#fOVNMoVOAwvkt>f0J zzCWQUTTO^e8KvotEC=GXM_Je#<@9Sw1e<;BZouJ=8=aTyx;f|}XW199Tx3Zou(z|u z*;O)yl@jPWTlPc|TI(QZIWW=p5;Eqsk$e{|mST7#&wnE-;RtqcglX{%%)3FLeCK+L z6|&U03Di)Yq}Sm0p}9DO6*h*Q;~V+9{@G7use_;g);I z9i35>*S{M$AS(8gyk<`e?S5!&p8pAZU^-lIq%iO!tj6k&P>q!Sn|JoXLs=b zlOE9iZ|T98OfY)7m|p7XIsDQUedq+Kfi#VSME5n1&J!fxED4sqgF!}cD z&dGc2b;axb?CkbXsb_nIQuIW?4K8W;Yn^fvly4Mb@XQ6e^9+R&@!&TiiWR~4X3gFd zkho5T9mZYQ)6b6|Voqz)p4UMif)S}iH!?&;N5`7N51|UNeBXaPJ(=NSFB7TkFJc8i zVra7e=qI6Dh>QwYAaFIBE3$SbAer0^R?aGypBkwideF>6e-dFcx z>zJBJs5mkBJXQc8F6fBp$o3_ zGjMw|ZYQ?1=#{0rOKj-I;`__7<=Z13S~#w} zOsmej$%~CI5@iSfE-#xtgaUfawhG5}+8XW1zmv88sP4ec5*94UG>iDS<`7ocOXK%t*e;E{zLF?}!h+6oOhRXo-s^ z0?Hgczn-Bg(}@UmI(pEQ5aVbxQar^A*z^{Qg>gRyAKPLpFccfrY& zn)ygMOS_Px2-nB-Jh-bsyZekY1{h<>k5txk7TDe8PK zjf|3TYAf2$t;NG=+|UB^nJyn*K`IDJ{8|JjbBm^sR9d;3YV~jNQritLWy{n5ZmY_# z_>rR4qSuHAmQW+42T=%@PN-=`Zx;buX*2k%B_|YVJTA#|V8mp114=PId}uK_YQ_Vf zr^ywPxF2`~i4-WL)709QIHv`NBs&Wkm}4ON9XwaZl&E-ZSgk83L>*}^^&ODYfa91@ zgJqkq-D6oE{XFtgpyIb=_@6@t|pP4pm0Bgg5 zz()4{c2M!hD7T_@>n_%z#9wZuNJ?8}uli~|n?D+QG1(>n)7R&XTUU~R!NdS5t*M-! zTR;z=izcnZN%;zUGqbiwYv~USU3hE)EQd#f<_fwrE>n#%O}{4i$w=~`$RO>l<{56~ zu|_1n#YVM192=RL8|oTSe^pg-65iQ0@4*oc)*KR;B9}^`7f_`V82!WWR3S3&lQGMa zv_yI;gT+ad&LnUb2Rj z_}Tn>Nn+yrx538D=Y+AMa66Ngpr}`3){CFGm3>MMZ(Na`!k(uc%#V(?i|s=DPnM%R zyfv0B_pU4v+2qW2`R*sF*qSCSxR!Ln1#G8b_udpRt*pn>rz8z>MY z73U~GudhJ3BN|$8s%Odk;D-PJt{Rqrny&)jnKv#VC5#79}!=<9X~J`!>2StS%>!)Ol1z=7WUl96y|#JT_?`LABd$3r>13~Bjt7~-s-XEs=68**--z!hM32L+cx^*YS?z@Y zg0)-dCYNGkkI~8V`c!fWMpbx69xg{ZeLIwp5)=Oe-Yk$|Bt9?~Gg3g2HQw#FU3 z*5?7~5&@r7eS=cYlbyJM*Z*2k>7{{xKmW?ZZSwq23d;GvR#0a%M+awDCV2-7)&Kma z}z1JtHRYpyd zp>O9i`!472>#RrbS_ddJI1U-S9F>};U1^{Vuvxdu2T!DYogWm2$zQ#T2DPIRuUfAq z(ia2<6-T#Tw#x$5r*@qhBt-kHJRk-CQMg+N|4|$670IoOCWqOeinU}0N*PILOec*p z)|pNoVbGaQ4OPODnlSEVz#1Q9ibB_6NS3Hzm^4*kjFM=C9WNBciZW9<9c0MfAN(ps zWBl1346+A0FmDcBD`5LFh_#q6Tnix6 zI_pu>SngT$CmaDo9EgTS?FmL<2qn*CLx+fl#_d3mE>KUO=F793#_XE`vId^xetDWR zK$<`U#$=PP#wr8tkaK<5T^mOAT}5oHqP0*HC;XwWQ()QD3-@a8LA_fTazq2x&}r*T_XSjhUakoC$4P(fA`G*#Jg>DAJbg{CeJY2>NiSn z;v;?6A(3&oA2Al{Z!mFH&rHx7Zzy%;+~LF(&yaIf;GVhwaHUGoK`@Q=YZHxiD=qG8 zdJS$b+B=|VW8t?~4bY%kCCx#Jtn3J+uMz;miwAD5vJiGhYs3`GWuHew9`sxNO&CDb znZO-nv4nF}f1#gCIEAm#tsl9j-he2iHsbVLKo?Z)%HuzIok*w2ymRB3B9Da%1wIR& z$3*Y|w~i1bf(OoT`d(96>rh_xu)LKioTG=TbozXe1t|o1>V~i9N8A>y&$viSmL~|`n+HwF#)cnmyz6sBSG>5zD z1I40=1K5Z{I?%`hi6oFf(ue0ODd(+tF&1t9H!?}k;r$&O7ZU0}* zlynJFG>r9wy#U@~(PfX7o=!Y?@9Z#AGRa3`3S5q}ryLN57?COWLyL)x7719@Z{;oTZ8$}*! zq=>zR6*Jtd&?HGF_-{S1L+|D7QoK}8c?)d^#tzPRoQ!*6;+y#E`9~S_Q`~xLN`sAV zG$|1mw{(l=eSIPX`f>j9F?*5MbDE*X@r`};?yPtr8|QkD!^|n>ry-40g)RNH|<1Q?c*y2o@N;=P_dVBYdODPYHo#4EM*<$s*z?TMc$bZ5sITX$^ofO)nMAKt%G9Dl7$YD$XT6P zp;yQsIn=WEuI^gP)+V)P2pTY(_uMx7ZGv@XRV+||+2f^Xlql>KmKsgsAts8qsM+Kv zrxbN(_CZL3iVKhkmb+Pwa7Ht9B{4FUZe}iXPuNb_fXOx%t9hPX~CNpzLbI=$=l zB$bxUIVR?dqd*R$)DhLBu_UBdR7;?L$T=wHl>)ic+JJ}WJt(qxCX7W&z;2=2;o9aQ zIUENes$MwoV!1A7y~iNcTC=+RC1C)vz%-}hJ02-}LfJ4lUuf+}l@xs)LN?*GS)ElK zQwmLI4H~4E1`Hmo7ZEY>V}jRFkMo-bSYWN=7+D$;RwhK!O1*O>#TtgAum6n zhI8fNu7c`TXs<`^r&|_z8AFgg+{B;?fL);HH+dr>plr(=h;|C*Bm=X?w_99726PLJ z-cpTnwF!SdX6W#46Wl$j!Ub5|6so31@YJ~+gKvhY&IP@}&bpxhzePCgY9K_f^L-;Q zn$NL$J=m~e$@3add1i58kcyexlg$#Q0fv@hs}d#dAZ4PCm28398)b829g9Yyz?L5^ z{ld077qZ%giMk{tRT2G0dEEH(1IK7?;8l#{vbV5v45vm=T+x&NB6NB?Yy)Mbr<{nj z^zEd8)~Qe%rQ>q};My5SlW8k!hIHtKRfZ-zNpytxViIhQGO=3B!JLts;lZ2~y^C3- z@qo{;>A+kf-hLOm*!-Cp6heHZ)DuFPZIod)yY^(<;hNgPQoZR$#*^DTaOjW)*%rz?uw^}Yp=E?7 z@0XrhX*+a;z~q{ug9Y!5(v3lUn0!2M?K zWqs;N%5rn;hzm`@iMlAS9VMC}=3j0&YQ$kHnlZ5Iel(moyzFpkg&1lFq?Qt`ZQ;vL zN2Xnp-nNC{S4S$iLe2GgBC-P(Mp$c6#?=U~Y&xxZs@fs4dzx7!@can%d9q!xt~Ufi zODL9!C^Sp9*;J{D5>)~!+LGGkXuETmY~u59X+;brc%(&938}3GV%;d`65O*CH{&c9 z`q?DBf%nD3c7e?}=#pRU5?+|-()RmG=eRm#zQKIjgmi7Pqi))awe`F*hd{N>J;7DB zb^BObm+h-BXthl}QJ1#(`(RsR?F%ZtRSZXh5#;4Z8R@PASAty`Tid5#P zb%d`eKS>MGs=A7mNGVE>+SNQNDkOKB7aJ&^MKVhDQ^}e*Ek3f=u^>GkbQ%SyS0YgX z__*#Fy&UkySa<~Iwv*Ds%J#>WAoejMx0@{b{hIeVn-4q_ebP)wx*sY3LuK$h4PT)` z0|9-a|5u4O^M9Lo|37Bkw(^W}zZem4$O+N&3-%Eu1!eq6kyOVeBSdaBs1MJKw7p*> zy94kQ?||@74fTVG6k0cO7OtjmUhf~F`-$zSr>S(Hx98BSdM;9IQkb2vH@K9ceWulO ze?)%H*%Mp+(>9qUOZPb^VO@K#Et&H_3YFj;5K1k2ys6$qIZeaq?e8X@*1920YpT%e z-p7@dl8hu6k?5yJK-jhOG?todCG(k3)-K}9x*qvw?Buzic)p-vY?)SFPyN5wp=JIu z$b*U@P_Skm@yPtyrjBM#QVkhff&Wj`8WyOIR(@il3jAM1?Y}Qqexy789|M=8_CGbJ zddxCRxNN2BLx6{g1Q(T$K<7&)u`ndvEIV@pjJ+;dd762o zI%qfTe&QeZ-`&*vyui26Zvy>cAbemsqj=DRbfP09jQP>SM5;#-(7`(} z&?x8e5Oue!p^J!C_Zbnb8FwS$a4bfscnASBa*mFnQReOQBpM#E(5gbH^b#J3r=;sR zr~8_abWlh`28&GKX0$T`Wb16N#@rZ9A-@^mq= zRt+pmCM)qpr>Gi6b0rG7RkhJ|43iN@o!xcIeIv~lf|buxZf$$uSm)-owA4jTN}i3R zX}%bvbsO)pDs-~)5QeP;A<$Y1;6TAstt3dkuscIpbhJ?^t;$u%t>&2-IhqFMnJ`$f zi&>~|dHF1+)0k##ZhINXH6e{tX*%ENp5Y;3Ok2!!(-CF5@7>XDlw1-koRG=?9Dp}x z8a^kks!XdWYInHh?ltx9k&?yf$9B&fYT53MS@UDHbgIBWF-8uXbJ_f?m=gwM$JE#=M2$8a6bA|z`J=0}|U8~6LezNdL0-Iz_tO#;tRhE5UrqGj@47|2hp0naJ zJHH1bu{&M3a_n_Sj>i`MZNf6PNrmz7ZmZ|MouS$`o2`|0=ge?NW@d326J|#?lW~gI z#MA74Ke|-?S0p=~&?pVQi$ETfKRz6cSvl+}k{KczS|1s86dw`Nr|4q7CO#1h2$z?6Ly0?{Ziwd7zerd9#8^PK{hAPyu$%T7?F&@w8tvLr znzq>D;4`Shwz?^#2WSb*ed9Wrrl<6o>#3rcda zFUKQ3OnKwb@9i{`kecNMuV8Ls3gd074)%ofEA{tBJc*zku4u2vTSss78syMUN_JI- z;OIM#5~zzdXz&h~w(Rv$1)(+PsaTD@{2!&nK0+DV^;0@cIM6)@ygVILY%aNc4%23fHeC_e zc;Zcz!s@&h0YgX_gwv{9V+6LN+6vCGMyHw$T{ST3Je3#nBr zmn^V_vQ;nFEU-q|ESQ%I(L%9OJ<|$#p>jzQ>J-9-a-~`>i7!xS6XJz>rZS;wuEVIh zQ(&YU85#-SX9g(((MR+U+phxAM}F(}OjLHOwWSU2aPc&O|%28{i>3XcPI;N-&)AW(>vZiwFs#Sy^Isznb~8y^DeQY+RXWS1;& zH@Iin4DDa1*j2tK3(dM2^l#-1?7_l?*spfMPc!@w9*gwu77I7O151-dpBVdJsiKma zvP!8UKkd8)kpHTMf{z39JBt_iw`P&{9X(c$^1d3Bg^PqoTLf)e6m8f@bl31=7iIAR z?^}%6>j64e54gRB3%(!y9X{4hTpwwD(|%344asy}xy@$T>m{HZ?-G*T{fe0@=~CXT zjf9qYb?B<8@k(VeI+T5+ND+)`vUS3vZrYc zBI|DuV7A>f`13%wq8#yt=}K-&=I+~}CWWQTb*%IPlTA~M$c}W#^**S@?s*n zXP0xI4%{7il8UO1-etBiay0PtM|;Q2qFyyL5lVeDIt+Wdyg|l28s%T2i7}1k69&?1)OX-f@li(#Tds|VP zkC`KRz{OMXfRQ>X3U|^I#aXp3^BpH$8tQ%?oVd&GC)w-lup>`{to*dYkql;rF2Huy zrfk7}Q>QdW*XIV;eiiL~Q9z%?=JFR{lZ2UDNpi?-7QK>U8*SKr1r^lx%4OWflU>^^ z4%6A?nv%vpa#dj+5|m#+!};W5qdb&(x24O0OruOig~EvZ^gHV;`?dQxu9tUW@>E}O zJ$X=+3R|npRzgG>M~b`aw6ph2&fG1=ZX;B6w-7#iICGIdQ$|~7@qw)>53UAu z>3aIv21WFe+Kg&T*uE)ZE3_#^(}|=(pAZWz|yr*ss10IW2uca^pBBc-XU? zoKV}eI;{>S@#A2E$IsdSdF)nXNskDWfvO=N-({VcDRZFbarugLtab#8Ch6Ja?HmIQ9-&X$dPFq zCL>Dl|tgM+>f@-;mERcQXcw}3oqNpi@?Ue}lD z6{+;JszI%(Meq_X&LU`Ds75vJE85PTDbT5btnWVI7mB1~xpdYNVc7!h=n69#wjjzq z>lT{4n`mHMgE-fb(4TrA(zk;wB%_7~)6bWjW<_r#BJx=q0c zxbyjW6epyrmr|4g=3t?aZYkyN($!mra_fPMK69|N+%09iI^iLexJ_XJ9Lj2i=l=a zn?h38j~%3OX^!MPrz82zN=I$d@lm2j!PG)gt;DXBqL9pC?oqUzy#li1mP;DyjpO2G zJAh0{2}6zSTL)vqlI^!mU+VFO9B)Nyf%X;OYzh+hT18WYmeuKptL}>vMMD|;eIt)v z)rPqZbCIGl-`E!!r@~3CNh7E8Jda!a25nL(g-`~&^!;XrVP6JYhLTMg*Og4y6S=o+S2n%0{$lcldZJ_5)O|zJy^Z`cDLArIQ?bz}d?&2_d75@{ zM;0mvGd4Biz%dYu$I&Z6uICXrv3d1$^u)BspA)Vk(Z`&7H?B2I_a!0o?f8OE%K+xm z^5`+8_xCVyd}kIV0hXu=i0->P54V*@M4K9{5nEMcjSap?UgC8nQQ~!;=_(P@=q61x zjS!<{Lq048A@}`%I=tUrC$omf^l7OoWeZNfqbh}Z7d;Q({(uv&lZaS`mgCaFaxy*P z_*Vt?wPf)VBlSiPtG*w=3ZZZgW3-kmumr+RYo0j}iK-S9BDWV-@-lr=v_>ED4I)zH z64JnSM1Mjadcgl9zDV>R0i0?1!bmT+kGd}*1QrxBUKtqucCA)*ZN?kBsw`UHR6E8N zJY;B!V!kp;8;a5r`ZUtoOu$_gGLHS(5!ypYm6~*Z*}38w&_!bcr=6I>Q+P zg2x_nEDIZnGa5pqad{8o{ElzZJh~j4N4%HMFXcVL7o{m|Z)BfxlE576>WM`&>mhE- z;rB{4cJLnd$8oOA9Y!Ve$GRpiuX~96Ym3~T?+YTmC2byKl(wENrcGIbtJ~uF2$tc! z=t zMc&lS91{e0*o`yfhGA}w(N?@Lca$w7F$;w%yZ`kfIZIJ1M?7&9XbA%C1VTwR#RLMI z^vHO{8;JC+rHFOUxScF!q~TU)>CQpJ&Tl<@l&iQ+VN{~Wel2g(!{~V=v0%a^{>7tv zB!`*{wq3y$&~m-;C+v%mwAk#y48^P11LSg3c%%q{ILLg0>;$ zcYVctVR_p`B3;u)Ojp*T->du^{#!nCt7?Yh*AcjX!Xbs%jTzy&88|LrDA#*-uW!IblwwKeT3p|-W# zs5GT+S=_``Clxt44ri+4E^QI+cVKf*Go;zd5n)bgEUgj+WIeu9m9DsK*ck1oJhvL? zsNg@u-q1J}6)X)C(H)MQZ(C5cvEx;^mZf96Fyh>Rui{8EXO3+9hN`XRM7DYQDyufv z;Fj2FRW34N>?Z#a8i>X<5}RTYbFN@1<*_eAQ@XF^d{H z9=M@^nTR^0VDGAhUD-0p?(#qacf&7i#V~Qy4|2%bWW1t^IOs6W{)`zw22;1Dx|N%o zlW~q|{u$%^cl6!QxOOl7mYoiYc%sMlM$VcUhW|C5j=l4=#mW+vN%=w@w{~kvHfVMV zq|I^!PxFR4(9FzF5wMtq&mpj2Mc_IKlS6tUoAsK9QB_@L6`VH#h~xZV1I(v!X?dCcK*zs& zB&rRBS}-AG!Fg*Ad>~mcLFs_%?=PJ**J-c$z_dLNzn%wbq!5;^p#pn^7#>J|NC+K> z?4dqpIu}L+E-nbu_A9(g_;2939&-gkIUkqA=voKg$0#{!a+MBP3;xJL-B!zxRB{wo$;#$Mf&1xo36xF1_}dXTregm(U)f< ze*zIfi=akPwiOr_VwTcX9H#ow-&6$k2C@Ua!BQEE3|9N0?65t;vjbFRur-vs#@?p; zJCXWS?{IIh*Hviv3oZHwk^`{#*!PLX=DFh=c@T?r5nD}(6o4=Z|R0SIhuK|UzHP=-Q9#T-qvF@-n|UgxhoD5 ztWjL5p z`1fCemdeSdH_rxUSSgWr&a!KV0(%aBg{m%}R-$O1Le{o5H|~1KyNh{jD@@Bve?jKp>c(`R3ZzUH{EPKvkH7yn|=$=bzet@h}DpMp`wBe$u#l1 zmMOOE?Dv<5vyy*&5WW5a=GGyhiivNginQPEu?4qLaoQSg6;^W( zVgmSVv3#td&SA7p_FXg=i8dIuI9AZH#Fg8csXCFw?6YibXuP?rb1^*7@Wtz?W*Qg7 zEarND;TfZ|tKXLC$!mXv5hJ{^05|`9pp7p;)Vu46*e12 zmnO1O{%TYsKuZHd2XJuKsWNVxl<-;!Yt(Cuywjht@yKGnkB0Dc+GeTN>=)$-Eb z424H;5*JBNur*?Aj)aWIx6{?p#(#ZZ+&O>Tag@dvWnguz$_Js+{E9JU>=$F06=NJB zvkZQcSc_f;BaHCc)wnI#wYpl?v_xs^YE?bt zJl=RWksxEg`yP$`+ws2XIL-B*bDD$Yc^xSj0&?g+Qg+A)oI=$p*p~uTr_?AhXbG%B z)hXK70<}irksL@utyZm<8}I_fM$swT_X1_7STElf17)Xd*BqdQ$}KcV3e-mBQ5{%9 z;n5waqimNQxc<=@w^6GX9rzQN8CV0=O1WP9gFv`YH!2Lp24R7Qpu|vaRBx{T1BRlh z(-rEa2Xvus)atbdm{E9?2bxg2^#{OFz8V7IsnS(`W+Hu|@RaTpLGhDFHB(VE8z&NV zQ*j!LCzS$dViHRh8d=0rp-d+zrHxI`F*K4Rwd~ z5F`GYg2M*ba@&ORZvk9fm~iaFuFs6pEyK+0SPx0KoBPXtYxM6M8DODp5dz^e`w0Dc zhk*);5j3SRabQeln63{3%R?q$8iyHUKw*?INVf=6Tdj);X>FKiL)#n`+S1U&miy6t zO*am6dy57@gZu`A4hk~F8A1uIfUyf@0;3Eq_O`;d45Qsnam>& zAPn-O-&MnrS+ERT_HedV-w(XS5H~O@pxgIH5ylu}lo@AC;pPvVOT~KBp6U9S1Mbi> z%qOOu`I+K9Ay4%I@*8ic_0RC<8{bfziVqUK(miFj<_Iv_Gt0Nai20ouYTn4BiVqyU z+P&vjX(a!^ZTXvWx6BB-$G&K{)<_<}mg+mZtFIrw{7t%BX(a7#i>f#JwCatzTVzE4 zF?w&mgz7u>-EZs;YPaIx_FkLn8+cCbK5qJfYPayfn&*Q{ulxXe`vW<*>LBkP3w3YU zzWfcnv}fc_J@u7MFaKcg4hr?(EXeo3=NoxkZ;*TW8(rJ(q2=u%CzW46^j~Ot^?PBn z?{M}<#wMRYZs3 zpdjHb7T!oS&)~z2KB;O3(sd|ZLbVRJ{Augh!d>Z1=Tj>S3#8TJC@opVE>q@iZs*^R z`|LTPf+%v~wXkCl3rvfAPch?X-5b!sO%Na|Mx6VdTH+pBZrZkyS@i87&FzkoRYQto zcU&sVVhoniJRe=!%8mEz{hT3G>c|&FTE&)Ck-A@A4PlX^#gQMP9hk{6Fe#YqqixEt zja6{GV_9R;R*}4eJFoROLSg%A`0qe`tx*6s!Uigw=oc_;{^j{<;eteRR*2o>d{HL9 z+0|z8d8(wlg*iZj0q<(@$#tfNYx_C@u4ZYEYzTtCi+^*8ZaGPp@c{GFh|P`0HopAuZ&o~%lNopN!Ve({e{uA#3I0MY8l;QGCt7rr zt`KxAzEYI3SOvNh-SZd9*-h~K^jTrz97H}aVb%+>Oj-E+f$0)5r?x5S17jDp0f+4@ z!$O>(oo0#pk@;bTI1w|uT$TuT_lu}wRP}guiILx)B~5Zf^i(;jT|;}vbr51yYT&|~ zM#1-Flt=Jwsw%g33tXDcSe#_m#RZ;Wt4R1lcG2QN05&3dhKUFd29*XXq0==Z2q!aG z)dfieM;){VvRcu<_Yy7;?zRl0QD|m}mtQ%;7BN_lDabKI6@z={Wsl_$Zm#OhB86I_ zQM0`s3kKxpDVb79sj(hPDVVuCs+2^KcQG0QIdT)3U;HX? zV9}D;(MO8N`I5VaWTGvz5-fU(roa7*@TiCTj;7u(qlc~VB02qo_+%b=hi2Dw`;3C# z5~<%JyaD6wzc#RoA|6BKu3$*Vn;W3uG&)fajv@*bPvMn8B2V;wR2Y98!;YgzJc&l; z&2Zb4R;hDbt!f$4r=Aip+thTlSS*z+XXewjGAwLY(sES17Sp1&EL@oLJgDlik?vWYRM^)gUz)a#$$R1q8uQ7biJSPm9i-(^)$^#|o^ee!!wqBkCim zrV6WonHqbuG*0cwaOSbPr<};G@~A0sacG~iOg=0k9urRT59z_w5F;lfOosAe^U}j+ z#l4*9ji--N60N6(U!-*)-i|J-q*oW|j>TwT`tZgK>wHX%{_|hKIB%pC@3MhQo5?X1 zVX&DwCr@%(P;T%tI@l)i!C&OGaT6P#h16Z% zd^*jSE7k2FO(MXYhGM_jzS&jofyqd>h}qNN;Vu!?Vy(4Z-guw_uXYt161j7&7-Kcu z{Inr+T=A}w0`A!mk#fFpMcN^=1g&JHp_TwBx2x7_Xz<#)2!(Z}W>ZSh!iKSaj)*m* z?Mx`bFl<~e@AWgTFJc~Et7zpyy1r|g+QKG|u!tH}#S2v310PiRD_zIo&Vl593!|Lf ze!j6JTYZD8i>tDovxtZ^iY~{NwU&?j#{JG$e4qq9Ra<`ZAv#PFaQ8gjD+n<54!+45}d9t8@s1@`%jGCE91pa{Kbr-x9r z6fcz%HG8B+)kv4z6>A=3*P&39mET59xUvUplB@Axv-NpGTWqYvU2e52bfe-4LeeC! zUngXl3x=-amrX7gE!q-jJw8nB&&tfB5~EwLub_$C^!79(XE~!`ua{SIg0^#1AsyLv zE8!t6of$iZ{2a`)Cct$8V#Fwnqt-RT>;c&r30rwx0%Nn=gOL8H*?ysd-IIdTo0P4!5TP$&mv&@l+3hWXDw`k zn{A9%{5V{42PKx2>s_Qppd}$+c}~^Y>4iA;z~^qqs4_o^jb!kPI$-?2hy{qM@|!V?tV2RjR|Mc*QF)UJ>C>u%|XfeGtP z+AY60X%xdLHF_fibZlj;a60S37{98C_}@G6biTkum-QU~iO0~%x+34+g`}^le|CoD zK_&je`S`>6OqORv#BVVmgAGAs5d+j#9eUTC~d(NKizOQbm?6kHlVj zCDt-5GQuM+YNf?eKknvP=BmhO$60ivn-iGnNPXY1h@uhV)4>$(_o7SlP|gIuVf+0} z1isM;2K_E%fvHT+t!XbqZhkEb_@p!Flg|bTS<>wiYdnetp{f9#FZ*$HOkC0CCx3}(bV3EL|3FSB&e^xQq`g@0D(lKAw&>y^+X-ZpZnlNqA>szu1B zytO~c^V?WBp*;_>>%2?0KM;J8Kze64K_=F-&p1R6IE*;J9KwX&!k7|V9RE@+Zrx0k^C$#E-b2>XhgJ4x>;HqDuhp#<72#b=R}D$u0@ zDrgCN6@Ot^)(k1MsCEVv@vlPecNnV>p)!O)2kOKWtz^4D5Y&@BQtsXHW5LVd#*w(l z#g=#&CHDlHcWb-Lq_6KCRv?Eq5hr>iy=E&u8$07zoHu~T&=8NGkT>Ose+As$WVUu3 z%hzZA%;vAPVW0HifN%Yd4Zg(#rKE=mP$}XX%T8ED*7bn4SHED9?noD|9HfQk`cYDUGU0Fs+|!x4PHjJhBG7qH}KP}PON{!Q;^b_2~ka0 zm{~$)WWIaeSxGL<_50nRJDepn1e688xruN!zQLwYKmqB;cZ#=|&@@d?-EXlpc@B=o36i0$EI0#`xXMq*TLgxK;Sm&ij zUSK6F%meV|f(Wyr&S$4Y9O!nzybjn`A-^1epN2y2ekqIHG!_fy^-ea``;|FBjIksc&g=pU~uhZ{9xJ!Qm@XqPl)e9Mt@Pd9VX5IaH zV&h3?hr5}{f(fHR7wkKB@)&nw<+14ihSPo-A~rbvhhd1qxE2_)NZW+ukbTG%=M0ni_WHreaRpg0j&A{#HEBQ&6Kv6b-5* z>~kMAm`vYK%uITXAWE5a+^bel#7tQ!gEkLYe|DP8jdF=@geh#v6m9R0vQSfNMiLRf zu-FK}7p&o@eY=?${Qaa!E7g$n-FIwb8JnHX`Or0;aLBwiBbBBm=ZA?o9LTHRo!r{5 zQ!5J4^*V2y+1O5%K$E)cuzi&+ad!_k~IK=hcA`ErInUV>KUo8w3i{ zyQo@+{cVOXYE05}ueLj6m8fb;NX=6yJ56+MK~!z=o=bQ@ig)j#7#n7Bo8^47K#&rw zcMH6auxZs?gEg;cLao29L{sGL%G^bw)W{<(p-MP z^_hXj)H3z5DXEw|M{wlr6^v9-aEBeJiRYZf3c>k=pV1BV{S&DAi9_A_d(m78Wi^YPWIyu-WXmOU*%X3Dhkc!q@>xW^e*QfL+89D?4b! zfl;n3S`|bdAmafh4Zx)t5_;Olp&5HV8(DH-yBX;4pqw3|cHq+sf_4Bx2Sl9>r8y$? z21?(#QZZhC;WtNb7c08W6y1Ul4_iN`%Fq>2uU%7SP4hLGHkxw3UgMoI-Q1*{mHNcg zKJ+w?eJWMTm$+jq99Qm3Dfyzx^iw$)vrZTHqOT#X5f76B4*w@-W9iTF7gnOobi16#RA z@hzf}BBqGYijFy=zrW%pb}FQit?MbV)JV)`Fq_jAj@o;T-GS}%ZY`1&;Yq+T6*EXr zp#9Ty)W3{-}@z|dpr82pP zze#J#9QnR7aPbb1UC~c5iUOq+hWlq+FDU{PeGkNvxaYCNX-OXc>BG+FySht zqdI?sl}c|sy7z9yMUe3w5u>9x#%>h>T`NZEN$hU;N#SOgP_J{bkSIRRK#4?Mv>ODc z;a93cu9Gf|1Zh12$4Lf2ntHHc2owQ_dLMd@s1A=%CWHg6C)}WRPkhT^YWCl zZnC!PA z&yJVNlK5HN*mR_Em)_Woe~}ASkM@(NaZ$mJ!u7@P6nrUXXd`CavuE9Nsn06Oa)ZNIKdnfg~4) zYU4QqUiM~dXXke1s$7LB4iO)#S9l*y&bapbH9s^)l1fO<$F{db$%@g+)h3Iy>0DOY zA!kQ38C%IDHOx-bKpP?yKGJ|bIMjV{3b>~bK4K3whM`LoK;{JnA#GU(qpNt!KCDgq zX>m9v@jM-*j4hrB(`SRchER{U z3Un*tHoL}Kx)`)rmDS=*qlpVg9o=}d^z{V(nKAPB<+8J#Mvc7B@ZeLi5_YQ8M|Qw2 zDx#R18mwfL?%S0=s5&2h`b~}5_#Nu(xS3A-8el3f^%Cpjjy<+>)jYiAHdK!L2?v-% zNMxJLWvFq^WgTREjEAK1%q-cz&YO5GyGS=5-b^-V(9@x-uSKz4GD&+mxVQXaB_XGW zowyK1R*t(qaB{nF7kYhbZoNQW-YgG28^$XHXa>bAT~}j$>=kOB918t&)VeZs(!=}K zvpc1aFm?Q%fbt@NK4-VgUA55jL7(wajJuC`#QA&(VI9Oq8~co~oF0uL!2h%9C%^Ld zSCX-wxSHDZbxHMDV|YogwH7)!siMeYbmxdaN@BwPu!y=$ux58`T8RIpUDE%O2b317h2g2 zZwYQ7@Dq6^K!H!h(4}wm)?9m-?%)lL73}^V8Up zWHW8SSi@vzOHr#`3jSLi@7>pLEH*HJHo2*gOXdLen{cC45%bVMQ}57{~SN3<&ik}ON^aE znz%)itDKlzZwPWjuDHbG^{(7qxW0eiE0=(b*TbWYr6~K*A#-^|A^buFa?ac$BGD13 zqA6K~!GERah3lZ0Vz*1SOz=(i21m zn1(!Jl8EpLSw_+q`#O5M{JcZZ6sIAki?Rtppu5U7fEAL9bTfSgq4QCvPZ2YC!WEPI zgIG;NjLbYRO-vja-p%qvr&I$LbwMumMX}TGW2?JQTiF{TX%;QJ(-{Y`$kJ zikpalg8$Tll|ZAyfQ~ekB?Adn6|QH0e%rNSgC-v;X43jLNc{UH@oMJ9`n#5MvQ9Gk z5oh>$&New9+1wk&=1&DfI&hU^J+V}AXQ3Dn&lOX=XCscnla5AUWrdPeps+?K6p#~i zdS>?&WoNF}S=rS2v&$d9uHMi0mo@IEt>dw{G2ev|Hhtz{L?@s5jiG(<3*PdFjl?bdDvv25CoBqRdkNG zQH3sDb_E~^S{Z8z1yfI1q4MoGUBKEs2ivub42er@6!2 z^4XSN%p{#TOjUWX#Rv@3wmHPIiUI|Gd56R8(=~DS#aS!QJRQnA}Dl`9a z#T^9x#K@RD=gT5CcdP|P)I1@YFwn>8?~z1w&i;Zk8>aXsFZM00IP5(7u%_ETtOEpf zNH8~ma!-a%Yyt!0Q|mKxWX{F$j(rTH*31OP=cGm}tZc=hmdz7GsG=3R3lOc3fu4eh z>t*v=I@4!cZ5CG{o#dRUx;ucAb@AI@oiJ)Q{&HNBF#${fT8VznsYUPlWQC= zNTmXzxmv%2hLufp4c9OPLIcZ&MVOdLJhq&>eFhJAI(@GR7ayL7mIs172-K^i#l>89 z;VMc9n0tvPLsFIOtt^2g(#%+{D^xh;Gr>olv@27K?90rSjNsNqPt(q^rVi@fa!svJ zW6ggrN+88;QaYH32?7hKI9?824yBT+1gqtNZn zhZsKsqAo0L#ikTcxFd!K*EID2$o3vHvhN-cW*?EjhgOEdF=T`@X{R`=nAPP+;?CLF z|M42%&XrZB%5U;z&Ruys3OziaZaey#qq0N?V*LB-ssElU{1>QWb1*Z>a2H7Hk0p}~ zGhd>Pt2mxy{dWHj1s8E)xnp^fI}|H<=9DU{aTFVFdkqaAv1-$j&;Lc)J4Q#^Zr#G& zv2Aubwr$(Ct%{v=RIzQ_wr$&X#~pU(+$AMo?a%V1`w$Vn>Dv>8dBl;=1gp&s!>D&){HB+hUUsH^KQoy;+v?l<5`nswDyVf zaxW_Cb@p+g2)(^pvR6bm$&J{B0vq#}#q!&>%-&{G3XjV=Er7$JNmCD{xK$|>GeVli z2=PK)$e}-avxcbCBO=9skHktDFt*DZ!9w2nE&J54TsebMC&~YZ*c1Pt;joPn!mZ0a$ImTdfaJK<$WhsuCsF zm!=#(%vxFQi?@bfW6*a-7T3jBNW(@7i1o<6Ap_y9*2+TfBEBaR)DU@UosLUA0eW7a z+*viwr3jPdEK_Ia8W~jlqOCeZsBw-y?1>tp+YN6JszfU5o5g#3t%3#%LGO?Gc}Y)UDlk^@+J01 zA;m8SjY5aapJ<63Qr$lkj2+EMJ;!%tU?ZJ;g#pr`9H`%jmRR%U1@%;vyH%EgJt3uF zCg!{^ht8P5<1CbORSY|6`;gNvb9k35+2LiZ zGl->7H?HLDa#jr>BRisa%hXi2Rqrj1o#}3VsS~f4mXSX8c!5rmH@H}JpG*FnFBLz@ zW^97GQu3M)PI^1LaSU#TtAdh z58045XUh}k_FJfNlYex#)sxYqq>=Rdykf#fo{0zFbiDFePQ+Vs2-|e@*OHd8adJl+O|m?VF(MU-ywIgDzuT-NZxNQ zig0DPF;+=|r{!}u&NLQHjNmkH^bSJTbmfF@u2tJpkOVL7Bb zydaDbYuqKmgy=k}(rW5}@m14muhuy7tUPXNv?d5-*&%9We*~Q^={Iol6mHo)a;M;2 zw%eZa0AADIWh~bpzc+7@uOkwzm+@^BZ5Dw5vQtd3f?x-!HuD2~z0=bOx35f;-?V!E%r$C(P-zXGEE1@>~q0jmHxf>{U zD*>nU7L>EimHm|ii>-|2bcXY_y1E@sP^cU0K~^z;$_HUae``{Z%_N5Yy!@aEe3@82 zn)vxRAr7PK8dzO&L^sT>M|>-YQuL6Hm!6DK*>Ty?^nRc z-XKnW_8V7WwJnAP*?3*h5?Ua+B()3#dmTqm)ZdWu)%jcVV zcK@7;RSQyYuzfKZtL1BE^Z7!=tcs zd3*g+NO)^m!WMlliKd=A$VBf$XL42R5<6@?%LX2@+Rl_Nhs``r%2mGgR<$`Dfw>mg zT!Jl`ie*DgATE$^5V$M|Vo1guO`C*cr<%1s7sRL81#(U0Ck+MiQ6aUcH6*?$)4W!%)xTH^%mNhXVb5rzP2Tox{MJ~ zY9xc@b?$E9&HX7>3=T zT3aT?q-uC5^SZ^FF12=cQ(o3bB1>W}_PNVsGHy4kq+!5|C=77uktDGQBN6nfy%H(` zv~f`@RR%FV#VLcvoqWS$qZeMBJGX@I{wDXg7_#iacI@(~)bfyR?>K(GEjz8iYJ$m~ zaAe|Z_g|-2&F^ND2OZ86_A>Yv+eLww$GjHz#%?SUDps|V>~q@qD2T-acW)*g*?X6Z z*?ojR@jE(3&Wp>io#iZOVN-N%V%wm_=)AY1tS{<|W5%-;-Jk9Ms(Y1Q&h@hqzI*{y z`MbK8>+idrPL|I0jvn-aMo!L-hQ`i+Nq+qEtACCbC#h+xqpG9*fdE16AC(oT0MQEo z{=jZ4pjZ+sH4>p#iC906%?zrOB%78|xIEYP6Utd{dezXs60LQi-Ds&^D*5w$<634I z38g(K#piP=VPl8y&GC40$KKBmbQi80W?u?P6P*%WNK{KyY^WSYSyWC`FSsawHi#@U z{`>b}x}h|rb7c6uujGXs_(BDlWmwQe6y|~t(u@>CfKUwbA}pvpY(q+5VW=b{)le$h z!PG26-X7Jo6l2962H#?BPy`O&kPCc^gYp2=F^e~y3nD{R-={no`XDQbgWLe|=IjkA z1`AJeGs3hrz(H^T$Bcz1ZgPg9B$tngp(dA^Dc0Sj65Dfm)ciqoh3!7g(*C}g{F>@C z9B#9scT>ZIB%Y3OE6&|p{6OHaybxw|XWWFM@Et?umeb=9z z{QO6P_Cx?aI3VG=FDYiB2{kqvNcAXP(fl~5F&1u%r>zC+(@~GWk7w)R$jxc>W)<3bfYA2IyLwsEre04N^&(en3V7sQ~3?6kd z)p<5e!2z%wa>!pX$bkWT$N1toFSR@Mlnxkz_!$u>T(~8 z1R4P$p=?AQba8hB3Sgm`sTqI=p;4yi48P%2Q%vjCH_8Zai@i4e z5u~gf&;n>SX%k~3y2Ex;6->AcYgKe6NjX!wq=nIHR!h?Do1>ZvIGWZR^0xYAsf4Dp zFwVS8I~s80PZ!|~O~NcHzLb+@TdqV;$x#;}XY@tWZ>$0QPA`_Y(ydbE8tVf8{sMoa z+^w>C+xYr=WZin8X2P!e4~EW;@k&a-+CkI_c&+HPmN8Ykbb?lp)LUg7iHK9s#fz2} zJ)n&1NYA0%^Jv!<9G8TLhl@4)Q(w(hhd77nGVvucS#ZTueUPoCya;sUnTIW)1^esb zn0e&2QEoXy0CXRdRdfRKJzP1-;u0ykI04mBBg}8t4pHz z=w_fR&A#!@9UkqGoc)7(A_w@6&c{SQh*Vz{|Zuiz{HQNzr6@Lr0tk3$?IuIay z;c}>%^vuD!9-VKWGw)K)zx<8xc2wDTe#TtbfI}a$iq}i(`4Vb4a(2WdXNbTRF&oBx zbYtHz%JiOUVsXP8*L=1?xQcg;2i!)T;`yy3Owan;rgy<+Sk7DRm&3HYLrbID#M~ml z<%Xsg^U$-Gnct)gPV4St>STKLodYse24GjDd(J}=v1jzc%c&)n?QZ{yM1DRfy_%aqEjN#q8kPM_;6GF3mZIF~g; z%oi)KqhGDlZ~50kqMsM>=hQ9y;@qPkd{MaEA)h|>@<2%tZ5j}jefNDVk3T^F0`K#U zpmA0j+~3Qhi!*$Xx{rwp55P%HxxT$xgTK_pC3K4DyV!aVVP` z>a&@$mY0I7th5`s{XIk#T>!EqRrTTA&biZH!^4kn2ZCo8oA}gfWdZggZPC;BPqyQ) zS`|sGh7KPN<^$1munSoI6EjJ^T=YRJ*>&tAnJpbcHYDCW=ybwITnV)N%HIkDyC@C8 zga{Ln%&?yD$#e;bUjDI$H_+XU+V*(}M1QMh#q=K?)Bl~@qGDy`1{jcnb2erZfjL63 zm|yZ#gi`S%>@@1i1+0*~65#q&(?#Tz%J8E8I-St4yl->gmLfb3<}zlOeLviNKmlRI zm_C>xAi{nuX=TCjVN%A}iDD(U8oQJFHEcNYAM9#Ha)%9h}pFPO`A{qbt4-mEocqlm9n>(61ImuW$ zIh)%3D_XNubmdV6(Rh{lnst*wYz4tZy0`3ti@ebb7-8l_=1YP zWCy%)VUal(GL#n}8pAlk+`+(bK`_`EU?@Y#fQ!oD)-H}KRiF@HX$%$m%hKSK8u;hU z&~!&R@DCFMeVz1!J2iP7rtvo1Kvt@O{%5n6IvbXpkH&Uh=J*{)ExPofHhtQL zhhqxPm+DOtFeP;Sdddz}kLtGCp50xT&0K^cr!WMfH8bwt4i5WH^mfNEP0Z1su!}xhZn9Pq7K$RY#h!hJzxL2xlhDV zm1je}-nJoD{21dn3oRv1vS5Ga>!xM1(s@J)hrm0H8PthIi=cP-e&{23`w(4hvlQm+ zbeK^p`D7d#gCs{FTyg9W%W$o1g|xpe4zHNQ@N49Q+aZO8)tYE=#AVyME2FovJp#@Z z+32b;O~N(=f$kZ)q&zl`T2c|67@vzF$&shGMo{`0BTqNoMkf{ZL$aSuiaVpp3z9DNJzb$HNqEOPM{oQSyNt-EWY81ab|nh3aPz*Mj|BAT#`jNb*mh z{U=9`isAiBN-%im&>Y@B_J-IYSQkbn2LhTB#o}w2v$Gj9M>N|nt1FR1D>S?x!bF** z$l&mF?@>FaUD$rUU4CC$`^$P2c7tZ+8o2vC(vO^)l;$}+tSd}YgVL9bL^w8Fq)eM4 z>+3uhSF@p4&UW;YJG7)#%Z!HSE0^o3Qr0!-t*bNH`>)b;J+p}HjR zaWdkexAL9YnD*EY0?FZ6LvYPOH)0!>48^cSDAoBkJIXv*c}PP`c}hhepbwnXOPO%| zp1i%&oRIfP)a!n~knin(NRtO)KxGhf1ra;@kMgaL}`JYUli4#hU7l zGOjAxAG(*ij9Oqln1$v#h$xiyd{LEcodG)4(kv2{NaAuc-I#2<>)PNJJn{7NF2}(s zH@#I!_k$uqCZ$4E!Sx?hzE`J*A@j%0;CLWY!SUXv*UaZF@y^TiR>t+m>#Oz`Vv32; z@zJr=lv`lr1rrQYYmhoAjfA^w{{;#LsZJ7r_U>t#9tZ{zJQvJKC!>W$jDYRqFvx%+ zt~0rRig_%8H-!}{jNAGY-sq%c;03v7TXc982c%~WCt#6+7k zQt6}SsQMNo4O4xPAAk#Dv`-2yZB!5n4Nl54fg}ZQ3c@`VD+6!3!=x@Vz?N0dc-@h# zOaRM<3F4I`C~cZOD6=d@`V)morUNXhkO>>PhL^R=zL((zbMRL}Mn8XRjGy131eLbK z1n|?br=``fsuD-FL#-Ka2H?@OujxCa%X-|ZUe*TAhXII)){Z{X|AvA|Ejry>>M5q9 zU7tMQ)O1Ap^LUCh!xi`;Zv%YxX6P^pYp~j8#g7q5)7p#n6N)WiDb!zJ9WP29+7FU9 zrCIU@5udV3;uZyhnNn>cL?5bGG%jeBwpD9rPIwR3+fEqAMzF3tRD};99k^JOPWprk zxQ??`5YUG+CrWXgQ-mdRD%(Jrbnj=lCUcv9UoU6*9D>9e4jH^7j@&bD z+=rQJVJ0k&gFb%sY4okb{f@)oC!o}+!#(R3c=to{>T5Jq>n~0*7hBM(Mu~Xkj{L2m zAJt-yvCY8+h_jN7jW@jIqNbj$KP!~+zY+Z)L>`tW*_{!&!gStKr`lsLyyceLLyW(c zznhtRM?Nan9lyGWO4KA*><+ucoCHeJGfNN1x5)~no3WT~Sp7qFF6Nh(+!MVf| z+!EH@YV2iEllVToehQLS{PtI9x$nQUm*HZ$U3c$rVIlNd$& z4NVD!_5wd{yNNq=a4@Kboobi~s=;#fl1WPgBg+QjH%$myMsQzgn@zO4%(bgsJQsQE zFBcz(HtBZAE26`pkWl*aJX*ZV^}#$@XWP7k4o`e&BD@ZNtPaViDU$1mzQ?TBe{gOd zp_pOe&ny`S`FEL;Dtb z-K*bl1Nu(E7;Z!j+b{vgJh@G{g8PHg^Ap0apz!;>8(rfmvM!z{#}hG=%tehV)%)A- z$vE0dyDTo`X(r-rH6js%eomvK+>e{cV0k|_-TKo`kSrCEJ?KlDu9I0g$#5bB?PX8} zj4g6sUVNMUQ*+DwI1sWPE*;cmk9?kMWwlhYSQ?&x+df-a3tUG1i$93PfMVBv{${Yh z#cuBZ=v2%sZA|IKKHvT~BmS>VezmQCIx4o06;IsF3B-L{`~p7DLMp)j11(NS+ESL^ z1{O;JkgypPd}X(}_3bMHrGKQV)yGu=?R_WI&fDdbMk%q z_j8uh%*&RS(^E{3uMf;FQa6GD$rs)qZ%j7W(QxWwio>$lgrlvb2gKNf6Rp$-sMtB< z&z0yXaxF&0+ei$B3Qwg!f_|Wa=wSks{1F|%eeqO~1Y`ts1ZsZpeI0<&nkz8ieMK(~ zsQ(%QmZ?Z_$M|&w9FA%=XaI)ISfID;Q2`(_#LYpzEBjUQD<)VWn2uW9P$Nt<<<|x( zM|W*e0#vGMLzy9$JulD{l~gQM!@bV%fK-aT%J2;=bq7dnbi#dF^c-Pj2gK{LXOIM{ z?tY3uG}kHyVd+BaOhfBJY7gCxvLX1X$q{p1ZT9=X?L%-xdd5cab~^S0i8w~V`0oe( zk>c47&eXnY?9T?mGxhflRk^0sx2?VnX!Pyv$o$6WQVY#VEo}!RE+a=|Es*qKq%9(9 zG^vdioOL2LN2e;UTr;hKGvb*#u2k}ln; zKP|sIe_JmrZiGOF6=ST)klQ>4pP18~pc^u8HAcUJv#0672vFJFF!6SI3|M*+TJ9Nl z<*5gfgv>`qppk3G6x8(GqZuPfFVlInJzH@aJW;d&NiJV^dF@fmQ$F zpuodoBXvoT$2eKo%wCv>xDA+^9z0`3SF&d7f`A*ZR4*M3^q*_9ovYWBQlUm`m5hQ% z<8caXY>dp8MxzvVu*lxY$3hrzRdm;Y1IkXoo6{zQI5o4%tNV$|wY+3jr6J&tBX2Zc zPWvB>%!_+m8v>(~5{ya0tJ`=s)^>I>4tzjKgru`O;4+{^?HW3B*Kn5lzAel)9_txv z$XBh)DayM6Db2*tq-nM!wXmlQ6x4r$Na|fg%SLUE-vm@>lMM_n`GF>0=GX7)i+kMrv5B~AMvnafdpImlH@daOR;fnW;N(c zNvIY5TA2wuurgC?Xvm)Yy5iX!%prqVCPn{;>vb;hxal;Em^v`5CGkf0ob>d{%%nw> zdKFc75QxV5i?1)a^Z~3-oMixz#2~@5`(@A0GvO%@AGt9sq!FhiBKrfQ-Zo-2G2FF; zEPxhvRafxPkT+;Y`xL1SyH&&GEoxJ1Y&%A*V}e6r*!NBrjORq7KP8sTi@MKo|gS^-9$n!|4yh?h!BBq21A0zP&&~t`;dCcT2VqSH^%Y0#M92WT5IMBO^__3 zi0WMry)2BWnXgx;GSU{2LD0<0>dp<0KA&~zzA*qGWQBdx^yZoU=ZA&_O0dceRoL7u z^E#B*x{vs%0#Q${7e3wgeI4P)A7&C>lZkOLNT})6&BJoW`tEU?Gr0BdModp&yeqcDsNL=5HQ%Lm1Q z6YYSaS-|`3^<(xMkNrx=pV!|9JTs_8@*zJ@LnU=RS!s1b9NxIeU7uf?*x>}Ys)fzzE`7xXIqDsQE zs+wzTEoJk8v80zpj~Zt^4$>_O+#%cQ46irOwem!njU{|NjOsIJbAA1T>QW9-%?0-? zq{jfDmPcB0j&v~H|yu9vtJps^1()@ga4G;Zh5Z66sR zs^Zj|UnY$)m|Oi;R+yC+wi!W)0aIN|MKFaMkz8vTc^gxF(WY{gJkyle+CpIyv!R#0 zL292NNTiHJB5Ch2@R&SQ+-7<=WneqBdepaJJ#Zq+P9F{&%3b;;bW-T5<)lDyu|waW zwYn!7<6XrZX0$BHT2A5AalV4To{SFg%S%z{hol{jp23`LvP2U?o;*`@7{ z4>JsHi~{-ERp=HgK$t!kqb4(_jg*@rCT7fY4<9CMp)bd4-2Izij=1nEuh0i-TJ=5F zE&QFr(Y+nOxGl;@`~tZexW#=0za@du^s zC*hC2e+<07&_OIzeYP^NKV8ZG{lM#g_Dr}q+R&>i$^3tai29ql$_n})1h`GZR>3|2 zE}Xb>cw$8I(0lNj_#uuhRZL8% z;*8p|p%XIgp$CJMOHW+|@8{X)9=nVMn z(6?^f!5G#_i{~_8j`edeP~Xa3b)bgjU2DJ^$Bb2H;iet9W8p45-~tbe<6E)khGS>d zTC_(C-$UPLmPyqo9*e{PJ}g8rEa@-kFAvRw50bdd1H)5tApIO3ZNLBd z+GE7u88&zEhLk@`1(qCm8STEg>y1RD&flWg;Pdy2;`0xpa`}>MPUJtK@)R7P@A*Qk=6`C{u5kr|2?^C~P^laZQ~?B}VL8%-MXWfnbW>)7Om z)l;nI`;#G~nt{3=hY=_f(F85=#eSZu?STi;K5JOXRh>$~>7nw-D;nv zHY=lQBX!LP-A!`byh|sm#?fc(Mr$&4+eX&vdfL_%^Ehh)F5E!f8AcSnkNQ!5+^x~I zql`FIs&WPIoTU2B0q^y!U!h}iXN}BbaVk~n2`101(A-0Koj6{NKzOY4^E2GYoER5G zI4YZPP09v#W=0#XKqd3wsf&qr%iz)x31vrTVa4Os&PNrECnz6CY;x?Y8`H|#8rt@} zul&Ys%)u}}H=DcF1B@?liHR$N;^Z9d9N5h~X)x-X>e?BZS;_2J z!e8O`q{fb_!@K&IzBHsWK+c=><20f80EJOMwPAmyCmeztWT&W6#C2Szt-i#*Df!o_6XoK^2O@||#;_^GU$0L^@*=_-|H^Y|1+xr6awu9VGcSUtPl`HPWS z<>=$r;U<0a*Pi1IH9E**rEJEw{u`+y4LY@tYHqxd)Oc<)4?Ah@E0cK2dm8$R_vUxO z0sNEuiKC`>At#6Gx0YPMNO{Nv3b~Jutb{nz*lK*mLqx)#zyhQ3p7`G%(!}=(Zy&B9 zLR-W9pfFP(nwslYsv8t6zcEW3vwu#n)ZanKCn%O~8HnH0mZT_|Q5P}OxTG~44qlyJM9px$|cf2kqB(fy;|-3LQf zb93dunq~69od}3ymcpwt2QoVf?L^-tT9+VvuXkCHf_eK8pYl{%5SLw3QIRA3hPUp` z6%#|vz1g}Gyt zPHHj2$c5?|PtllD9b-6K5+O)lctQfTKYCvuo^LE5ctY)%4Ljg*NZkZI9Y)qV z7l0F=8_DvOn&7nO>rTz|j2ZBZZS3mKw?K%q%rq6*2ywjK=>@&bZgDXAnz3KGtvE#K z;z!?x5Zo-l!4M-iG}fZkY8<{qZ%u6lx1e;-6OCqIaUA%BYi@qy>m%CR^5+Ts)Totl zn>)m-L~)%X#9{_6UGgb@`H4>8U?^#r8m1wZYel`5rx0HAGP(u4^r} z?4{H(>yU)+?E`X^Hd2PVk1DIJtd0ZyfDvZ;BU=qt$whbrsyrdw$#kbMDmU%Iai@0z+^3QdMBaeLp7!<{O+}n1C*R;=5wwu$(pOYB=uaWtwy~gM> zs+|gT#rxdo7gXKqw}Ih=s+~%=3*aOG)pR0DvBEZ@sZ<~q-@+_}uyDs_(}9zv80+8> zBZ4xqokDsf1pG>&q_mT+FnRasAUK?~lky0AEAdnz%aeTcU?2h!mmk>7A06U zP<@Sz1QLj7_K;=E!7PU;RHP?zyryRW{1RlSU$*^+ud)NS? zS2@@|Z{Yk0>?^%svE1%7-78hgz13PW5=xLDtXQx)@K%y2}SWsL-C6>nB?%U=!>P^R+^tqvCUHJ0OE}DR|gB_N3 zGm)`!&aV}FMAoW-q%{^PHhtbIp2=xYfq;=WoJa`@DI!v$$!fE%fFp|2#tKfsI9ww* zg6_S}gV6dTo`;Dh*=_y~U8JPmUpi(kP@eIA#4wR=XAjFtfFjDD@ka{5vWg~j;sIyk zODA)jYSG7v$6_%QqnSNK=#g3Q(dlQg%Ovudfx9NI>yCMTmB3Fqf4N9BjL_bxCa|unwCQ14;*lH_m)%u+*0oNnu ztLD^oD~d4NfXS;{-o)d9F3iwlf-VX3721VDiSIHjOx9sp3?kzXY&gSYt0~5zOHIo2 zD#~f*A@JfH8{-Ik$71lnbtU~uEzRU3W>KT@g z;-?xL8yK6^`)toj@S6%M-t}cSC5(+gy4mR6WSu7NS$q_-svTMWD z836MDF4ilSona^goV&t6x^-=ge=24j2BFx#FUrY{Sr}ud==Uf@NuJ;qX>V}*1ow3DnVWNU?Cc?J z(rzH4ly_FiPA@k}cbJ=$H|&$7PA@3R&agO1cVGu;wmG|AZx;&vK4Vyfd(GesKxhBN zJz+2h&@(vUUNE?G;}F)<(CxP?=FJ8s{49X%D1 zO4`8J#*`ZjEqoGhnq`{b)!&lCd9UC`C?mFFC?AIW+$=}5G^{9#-89Db^o|`};&hy! zFX&@inh$LSpwwl_l&keq?EjpVUDnYS$^6D>KW|jRCDZt24rSOt0bgxESSuC;7KjSO zh$Q_!MlnU)C+D?DKy>V~VDfWW%vo7}&^GUJNN8a0FqeQm5NJl0Pn?c?IgQ}SGLf&7 zO5!?IG!z=w1o>p1omDQ1hxd)trzgrMvt@ZXUeon?!^7I@RME=b>U~IMd1aSgO|7zr zl#{`rLrFP2>r?DvwwaEl`bcf+Q>h5#v*l`kV|Fynd7xQ+<(0FX~c71L&#Ah)=G zyxUuGwR3Ge^oH3o=tYEwM|Z<^y%rO3i=?M=Q00Ql{45^XF_Ug=4IB$25A(HVfyEBJ7NZ=o#f>Y>i8%F9NiB zi`U4Ksxhrdgj(qPb>>XF&{Bm5f5pw+wkhKuL&(#WC1}ciRp&r%QOOF4u#~{4ns&L0 z%Yr60uMk&`x``DK{TwkwfEkrwV$19xQrrdg^xh1%vlYmVr!>hKXoQZ3=5zs3m&yFe zEIHL$(k)FJmv<*PmGAY%&`A-^F25PeD)YCB#Uy&{p6vp}qRHa<^iopq_l$I}gCput zypkmEZHcNBs5UnRFyM!K&!S8NbVt#hu@u9o5ylEaAhHHJmSYMXYmyV9uV$mYO2C3|*$m8=61p7_7 z-SjjUH-GqrE{(OJloZ&hKSFFxDWqKaz2%mAdtl8v)v&T_(BL@q>~7&qqKHY^J&kdhfj76Xb&kJIeZtoc$6xxtg9{fEY<438Ze2}!kx zNLkX4Ke6dUG zpux-j20l0sbL;3FfkPvER(Qg-)Uy%|?ZZH{XO?(mHxbdbv$?z19eK!rbqutC73^G%j}7>zPy$@qU#%yZ&9fuezzt~xM=wa z*<096wozSq;bJBi}4TV%N2GB9;%#}tK2Bn0;#+agOr%ssz zE*^wbl()TlcX=V5tKu835adk1hPdkaLe%KZ*oh8#5E0?45h(r9*oqwjzlgdSW3#o7 zv$x~UugZ1xAKVw#Y3_0$EVjxU@XNPA@xYpo;+8&njDfnIrfC2Rq|#T^M$qXnS0=Dq?*rJ#?AD>H=+sYheTnBY z>~_kLUiRZCc4I7~2*y_-?jBtO2U7ey%g@$3^VN|&bH13tI3clUr{hSzf zH)T6O$6G!CKid|4R`;%?@Nr4t(kJu5PwbXk^+Pg$c~a~!Si%KyO(@-mYytg=jPAdX zM#&yU%AzCNqxxVCW#aisnvuFRzP;8e;E|jcxNh*h=>J8UfO_Q!gP(-y<8NJJ|Ms@} z{~}HQ;z=Zo|GbnP`mambt!9&jD=9{WAPj4NiH*Xk<~%fpA41w-S0Rm|7&pcZUJ2Xn zTkstj9KV?}Z$R*O;MHsP%qA1kRXsBDOcB2DY&^oJj@xiaD7|=}6Q(Z#iwHVR=&BTqXi*OH~RPn9I>#r5S@v78@5Tad7IMYQLA`0@6;sjfJ)pI ztISj;13Ut&no&E!iboi?zi(F~A2EC0(b;jbb+u|^o1MeXgNQ%O3cZ*AD7M)UwZ&RR zSy$l_?77&&&$s9+oMofZX%#8dYG2h=9vLq;?e!_xJKc^!O2cJDp716ORg>neGYm2z zoMX6=(GVN6oNw<4F2N<9Rk^`J-*iS0tw*o3P0}%2jU7gGK2BIVbmk#HB#VoGnLW3d zHS9u5;TpAnd>C3at?_N*TQMRv>xo(}K@Rjx7#s!wL<9yZOX@I{>%njwm8*xa+s%Uq zP9m%|e{QkVvssF1pQ{3$FZ4kJ+tK2`X%_MWEgg4aVq&6@0R`XalLZ^f%Ph3u8Sscr z4~hmy8cr6@!l1*E_nyLmR|(p3?wxEM5;-S*l^N!z`La=7<$4^PDNL|_(U3q>u)$IP zVa2!9awaxx1ooVIk;VZJd_x{v4Rdnxwz1gNmu)MTNf`d!XgEhJd*x7~h6ckZb{{it zV^uY>U%R|LMVaLrL%glD;6TjFShgjt31$*)3sGR58g5*Ai)zX<$3PnS>n`>y&bv?p z=ec*g642)oPppCHv%|UwXv=bvF6lsY2kUn zPq#P_+t4vggswxQyD4F7$luk5;d1(UTi-Mr7uK^R_ylMe@4i5y;&MD-mtpCVV&|b> zYJ2|J8j@BmnRv*yE_ig`cN!$vZ>w==Z1)0l(X>mzrI_T+tp`LEp#4-s)|L`Fcr;WWc`<^#+}AX})RDvy1SCPVg4hOQ$r2UaKrp4(3%) z&{0Fs(V)};x4lyMx>NSb4+!`zx$ZIFy!ZOgo{*AIJMrh(jM=AU^}p{4$^Ji7Z~qNM z|N5X{=;&nXsBG%&Z24(H`cHJqO4e3h5I`IG(5?+e`5CYej{Mb2Fd&q|5t$;z5SSmp z@i`A-*`CN?IWO6a|DwiK!1TNec_YmExval#iDP~)JoA&&%h!v|i?gKXaUc*pk03L%@xgQq5I!U21k{Do1f_C|>%m6b2?o8ya*A=E* zZT^9|8Yc->wFSSLz(E;REI5bb7}?FXp+5^9ID}-srOZ20&tOAM{8(vb36AK65rBT> zy>?hm&7$bKWL#6G>?Bxjk_-zxDo<8Bq)Th_)VOHU(}f4T+bdWTI!y*MR0c?Eh<|_a zH)L`ERI9QqEmnfi4(0VFnJk&Co>v==Lox5kMUerslB0QPd3F)&G3vB`d3(9}vSMC0CA~TBB;8}t zYBU4t53*@CjUHVbVJ(%tGw%n`Mv1J(NhVsx7E2#FIV@&$HoN38Kjmkj@ztO^ZoSu= zNHfv2;QucCzDmc=SlTPPkjD(C9u?q+u|h6@r}-PzR6)WL3zxKMD9{w^9zv^2 z3YfjFQB=IN4L0y&5hsT@?VSBo_GjirfFaxAy>$A4=ZXT`qvXlP7A>&LHLgsn+ z%)fb%u!2B!lUA8VI1!vu(ZQ@$`99H?n=+1BjV3-saDpdo9V8Z}kMBtK^0cISdfNS}HbLImP=g~TqSCrdb)x38JEin0y ze@t~(`B{U%eqM#@|82|UZ{5!SP4@ri)76Cw?T%;q*fqx3kZ~4g>seTIrQ}JH(YOJs zmEqVV2}8*#g#weOldvV(?%aUu>bd}-pdx?-hK@%24F(Ebfg;HcXFRHT^eh;1JP!nh zD%7z&_j;;skQK~cpyKc zdsbfu9G~-mm3wd>Klb|uyvq$l9lGO!@DLB+Snr44-?Opog%|C zyTMF)(K#u%6Ye~Ls3x5-^zpY=5c-A?E4vWbZVV|GH#XQcBSDeu1__ZngP-ENw0N<% zVmwKGmQ34`;-k;DI&=rLt&+o8Sn;>qts27{fUwAx119Y2UREYw%->ADh%t$`aymo@ zx~)>fx~)pX9oY8lc5%0Fty;rgwPM301hfZ81(!obOxqJCPev7yZ?$T}Z>jsf5Qw{q zOnjj(Onif=F}vg@-id1;Ut*GOLn0l2yzu@n*4`>8lb~A`9T;2(cXziBcXxMpcXt>Z zKHS~iox$B*2X_W{cP@AT`#ju-v*VtK^VHQ39nsNMt9n(gT$#fj{6;0QcPl8Za>Ep! z7(8GS6Rio*oPK~emDzoYp7{hh&l&+2Q0^XuF3>{J=XSW#nOddAaKa@ZegPzWuZ z2#gZcO#4kS-cM>1A2J3vu9ygph))5lJX#_IUeUmahGS~THEfcPyl!epUXvQ4xk3xu zSTB!$=-^I2Y`Uv3oH+uAFfd_a$Y;qwJE)B5GSN@0Vv-LtlN5q&Bm4u*Me)5u2ZXFn zwEB*?Rd*4}242*wys>u)1D?FUg1rPGxJd!92Orw>8yU7ju(rUna5oRznAjg-ORbrI znsU>Mr)IcQHQr(aN5wlyghOu%74}Z)KG;ZWV;x0SBruADfnkp@{~r4m+#W4xstL7r zzA~725F|gib?{jG^dds^tt=q&7qFs&olvHx!j4ZHOW{4g%-@%?szxx)2tFiPqPB;x z$^tuD%_Q1w(AWeM`%z;c!h`bMUSV(25F1|1xXZ6NH~It-9_}?00>aWr?^1*iTiLUP z4?CiAeLq+w6H;;;>uXJeXR#1$&$eE{o>J&IEH-?r=25&^fd?C>>c}q=S#eZFcaF-_??GNzO!F`NJjsI-5KJ6FM~|?KN$U~3P$v;&MaBfsp*Xue;tIj? z3U4*Mn*@)#J=!yO(@AA9YM}lLljY(;%Wt*%={{cB;0zwis%Bag;yP!cp-OBm29zK{ zSZYO9*^^r1wPJZ`a?Pz)g8OHdxc-d}nX*u1T8fD9g`K()N<>`ub6i@vW1HYLCiS7&1%x%VV&~IgE?J?A zu3528gW@XjR61v{JVJ4H=Q&rr5!MsjOe!KK{9-xN)=05bE*%2)|BDN8N0ninF5*F$ z$mUktocHA1@5Qea5H|`=Q3M}`?gP@)iGX#Bi(x)K0wMpG_pYn{2 zfCrjW^{{M1d`5r-{dOca!ds5q&C5%Zl}|16nM-43sjp3gfhSA6NyhFa+qqG9y*geh zz~fX%rx#^<+_6r71a?N2p%4ZAFqiZod@kf$pFG`8R5i!RXz&L%tm-8M8VXuOXQ$QO zNcG+bQca6W>NuOWb94PmL$RoG5k>p(ryA!P9#iFMQNLE2b!ulCM47*vvY(RtONrdAS-YtU5 z-JbON#Yp|jVj&`QPhOj4r=sDZ$j+A8+yTTgR^)Eg5#4sIf6HCMpiO}TC`f@LRONI8 zGuoGFwJ#>;{rH@)zDDJA=c`QS&BmUSnHzIUbE_*(W@l?D!!h5=4`T{3GO%Z>O#X|i5 zgvYJ?_`;n>&DW!yt9^4#=QF_s5&Y|2J?CLfj7$Esa~?nl23=2D=0@R#G?;`VEtYev z)@GTJtpSGO`|(#vQv4w+7`Y_0%Iut&E*O{1B7*KH#&MKY)MdmVSfiQDMP6G@{2U?ENUm2^Rar5q_j{MjA30hyg;dsPz4INX0z;M$1-DRRy!3(GHh$FCIUPRfN!aF&3M)v>(YN#nWS z$ZVb6Dp!x#hb%rCY}#g9`@Rykn&9sD7wY|3IvNU()?NAG|Im`n*2lKKdsmQZ)MnO{ zFN--FfOz@Nssp#7Ow6d+VnNx8kT^`K(|vwR8u(`!A6Bu9Va{$X#V+2ZBI?XSQfiMI zLOITQx@>#FaV_+BKF%Rc5Hxjdyb`PQsDzPWP2^799+Z4#@=cYzNh>YKD0mgfB%dO@ zOhws68 ztR`9fCSFHR{z}X?&iwL^zw6104V}pTwsRCb>0*ssN^!YN-J559Z1D}Wl#}>qQ+5yG zFeU`4OKZL<5TEC#$qYXK(SH=SFRSIRdPKrjbVXExpA+LqFYS3~WFit{r4uP-#GpA~ z)FUgMH>g*n*I_=^q&B^A@ic~sDZde+HIjTd_#Bm{=eR}e^!)X0%064Jts%(G6;Pcb zs-a~OA1^NUnSo(=G2(Y4$WcLv|M&+fxc#QbC4M00O4Q6mnX}V#*0*UVm+*KPP(#p; zR59Yyfr(S&3RQ#ulnISi489Tj3qAVA6SR;0k>I!-o$j3}fZCpdn8x*Giq=jC^n5(Z zVWHsLaj>16-#)B@d)ayTF*q|5Am$4(+I&(*dY4LstIim|RHsglBfhWFBML2gZex>~ z)x}g;L-{m=K5W4GMn}Q^au9dy{=APibqNy*`^pX3p1FSQ+D+#oThJt*9D~$%7QLT( z8l;i4Z&*?SSJpE5ccG#}u*QMfW4^(T%VWHGqcS{f__UG1gp}2Nwv3&*UKyjA+4tLQ zC_DSxK}3UBsy9F9SLJ{T=Rd!;LxmMM~Z5jx<$+om$PhYycmP3k+oXH)c%Q+lGL)Q5|34~?*_m=vZfX7t|>{SgGv=+ zY7#56hq|@X`{8tnrmS5~q(yAhfRptvaRO7Ek`Sr+d5o3SO}+yyE`(?u1hxThtH4fA z6xfe|FSGcbclW~Ar|-1utJ`4mjzIp@!>&`~O5Zd_zuMpT_d8@Y7{jbLD539WHxb8b zn0MZT-6@e@%8p0^LLCTjSHuCDCLi}{Un|j$?Ms!^SK8-E;HU;wvYf9B>2$y&DuOE( z^O*Rtubp0h?$?IV$TkF}$4wS$x4_qIR;?MLOUaVsS>cf&be8=4)d(3vMV5uRFGxKf z*m{;m?)Q+Wz8mVk=P&CG#wm}5A466$7#Xzrnx$Pqc3Zkhs}g`_)Jlfcmw@*87MB?; zQ?r&dD<_!cvETh1&Mw5VPTAbS3F4>W_YDPv`>&uEW0p~p#Wyjj)v1yOYZpx+t&|bj z-qc`wCvPvQ))+DER;=p$Y&ZW^#K@f=i+#p`R}U$Q+VVsZ9(l=rNw?;lysgL;OCx_Yxg?dCaSNJ|M$!9P12@s7o#6#DU_Q^4YRnxM`^EU1X0a;`@64o$- zg`noe824s!q81#s4H&XkYe(d1$up*C6VOg5v}@$a-|9PH9?|cgu1?L~HxkOV#fQ}7 ziK8t)36*AjA~QQ|*gSrUc%Y2$&;i>il!v=xNf{qRaarDo1aqU`b~$Eb>`3#^v^4hj zsV)%)p1@ar+J@#$FM1tepKXr9W?7trFvykSgm%PRVmd;}HBatI+M_kzOqvDW*U*t7 zLU@D${4mV=y%m054LCjjSlbomK;a7%6lwh>ou`u)U|7mw)#ynoWuk+zYRl35HZeuJ zZ@7f`d*Ud)4U)}i#EdfwjyPoiEKBJA`yhbeX3u`^N5X!wxDeq5k+dt*@TN@Fk!vnx z&vJgr677nJQ@LqY1(Y#1osy9R53py?5=c5Rogt-w$P(^95(1|WBs&h;uS3@w`+?RE z$}^!7yujHZt_k0E!rrd+D71(U*_wd45loQ|b|=d{!6k^?-Hrk32ISHe;R9OZQpWtH z@{$2cQ_Zj3z0m>xrVI@@Q1l~EmTxvAs<|zZ8yYa(UoPs?B?m{+v@xXx&m2cz zNIo}ht)v9Q-Ve7)K38pV@IzyqMBgPiXNa?*@pxxEtC< z&})rUL2GkirG~xqMvb$23i9oS~v z7O@dxYFG04BdLZX=E8Qg5(U?*P^El06GLK;U`a;dG`PwTCl0M^feuD+IV9>G6+`L` zVwoUALoOc5$=_iiQXXp=#j z^@b8KMc&0Z%lGkGyhsm-0~ewOG_WBdjM8O+rb6(oFJJyFFS(}pQzNek0_Emw^kM3~YkMG=U0C#faflQ|=U^^-C^px?f)bhS{)CFx3C~u0vCPvo z8#Lr>e3SpOIJeX9d|5d8{rp}a4B(U@U?6}Hq3}_dn5?~UQCOJr0|L3w{gNoqCbBMh zBO(-X=^TOkb1uX|s9tn%J2HUmMgtOaqSyuWn!TzJ>*GI+0Tbxtjybc2N9NFlZr$e- z)XtzIx_dH!WsM!yiUNG*v7h{?bJm{uYmUn6*<|8b2RHu5jZMeFp~;nt0_})R zi?QY@A_tREZD*dSO{T|-e2sERZ`o)&%H1Jki^>L?A%Ci#WSJvbMT}E)BJXwRkPg>c z+-T5;yR93VtXWlb*Uvmf?StF;#$(GF6>sEUlP_uVERdrNkps1J$9hE>;g^ z>K;?1kJqg)UzCF{p9h^!RoVWuPU?zcz9kyi+2GUK3BnAK85ZElfjzpfSWOwr2LcP9 zb@GDs5fR1D-vfN@nE-NWHfPHgAp=1$rOEsYI&XAkt1g}S29aV`WC?;#_`*-9ANO7+ zg>3n6ibAmhlLtVYy= zV&ag&u#ZC>vuPg|=f}yXT>Wm)v_WYGJ8wtz;jAceX8Z{c;{Ea{Pb^qwbW_p{qWzTL zNFarLfV^c$#8)slC`?qwb77KD9}A&gNCvyt*(g7P@0Qy>)Id;TP$N)s)JbAe!YaZT zM{wyhmAWWMn4E|c=SfPBfHxSmUxpT#x$ulG5W-`+)p_NbQ=?@#F~vCP9JUa`_E-gd z+mCv-7q>|${FU8t+gpcfol;#+i=3yr&5L8!Olg4XsEU70iwNouLW)zScUYTss*y)B z+U_!yWi`(0liF1Hwf}Bw4_ngd9HghXz0`DNs|aQmO3j%j^_SRhX7q2Otj>}wZ%{5z zp^sEqvS)5x^=5510XjVd(>L1CK7x%8D*vD=TMBuf3|OZwl8TSQD>HUD?TXsssmbLq z7(|%!IWs2CM|#rR$T*ebz6P$Dq0KEa7T`BMCwIO`Dp3k@;BrJ+rm$X9Sa~NFKt|vW z027BG=3$U6mQ;+>vnh-4-6`Aadw+3kX{1k`)uGdNh2mS8Sjt@W&>^-%wUbPB8ExmC z0zs?IS{ui0wI^n+ydRwVW_91b%#R(}al5a|YDKM59+i2H6zs5!Tons7PUi`-&C=Qq zQ2HFMl-S$wx$}E69Gr#fAJkXzP`D*M3cV}KaHnJJV&#}U@a{OK0JIjXa{$%MmQ5nn zi95MQI4ebFSKJD`eElXcD{a3y0v$=bpdyiNRF(O5;X=wOUQ+IXcwx%jV*VHKG&kD9#@pd^|yb%|<`YNra5av0&-nL^}xOTr(GN0@IRr8mhM5FjvbR#&$)KY%QdrjPOrZG624qLu4ncb7bMir zg-3Aphl}H|M>(_cp1W}}w+T6U#Z>bN>a08XY+>^9_vM~s0_PneSoJ;~ffN?nwkV>z z$wG*SVvPI(1D{71`nfv2;qg6J}B&?)**D5%*shmd3D)( zB}Y?a(#jp?06kMB=%f(Q!0QJ2S}gi(>oKM)7bqx|%juOpIKc%bNDOuMw2deYn@XM7 z#*{Nf=^UJ|S}t@`oy{}hU$q=cU?l3(JsvZa44i?N-+&9V=F-dYkG@$n0;0e^U|iVI z(ZW^Pz7&D=t&`B~IrEjv@3Wyb=E9g+DkYTLzil`dXaH&@mi96Dxi)K22J}S~Uk>88 zqT@rPRlrbLcr7BYk-NYo*O0$JRCL4vLX*fc0yZ+4&Kq$GR1ORlGD#92U>~i0JdbWM z(hPz2k|a*jM_5=LdddV*7WS=d%;l<}K3KvG@Qn7*VPOdi;qK^5;F@l-%qN7LH&)w0 zM;R9DZ9%sy2F=b-fMuI}nhXM<39?WRoy~_MDUS-@MEZjG_>V<=`>{3v=9@oW@IMwq z|IdSs^}mBndBSR40F~ExLanO8b47BSArK=%mYFlCOQNWiMVwj7J6M=-kfIn*Hgycq!+Z2We zyiM|do0iK6L@ENwRR3HMQ1+;jm~Jttd~tHOs4{U7Bq}i5xGqZgjfLu#_e9vqVC5(S z`#RE=UolyPcSu}mBZ%~R4;q1)24Owh-K8d{0_e9D#e8+imBL80g zR{~Qdr^lH7ElpDOJ^%kV6BxGt%zd`1tvZeO$eUg;wmnJJ#sC>Ot{WRkj==*nvIKRq}&A}{3PLu+2w^cV~^Q0Vs|-! zm)Wlfruympyx8ljlgenB7j6xK&S+?$7=ynP8bB6>?GdgGk@C+1oB-z;&KN>UiTGO! z5RC=m??F|VJZ94Us7J=Ghgn^n)e@aD*dg8S4O3`hN0RxAsWTr!h(7*$;k~B~Jvyw2 zs|f@Fz%eABp>aF*fZpVhBZIEmi5uVg#QRyHe{9gzQ{q;+`hW&<Z3E@^RxepB;7qK9)2*aLZ=pvu2T#2wOHyTG){vSdgzC;yi9-TTVd?vF@BCPutvwRcJ z(K?$<@i_s$+)_G?X6z?CY>th6CRuB_>*y=+l!Iq4BhPqTn{dh!LGkp3Zf|G`sRQ1K zmn<}O%yzPUZe=vPTBb6SDGgO8w{3(3 z`I;3o+{^JjsJ)-LvGW(Kk2%Z(5xRHN=Tq>R%SK4gE{=#>$-awj{w!H`zOoknORVC- zNftF-Zsw!--TGMc1bRNkM}UJ%`j1$AQ%uWB?Xs@5c|@K3l>DD1`+87iza2#ttdTGv zJC@YWQc|xqQ*-k0y{OByk5d!A=I&|n^0xtYD9U+rnhj)y=3zPc>>Uxy{^XIn`1v}} zL&5}yj?Us=6kYt;_aBZ$0gEUW_b1wGfeoGDww@^V3BCmZf0V_cuszICM#PhMF)$4< zy#k!!W^TlOe#kSf)B+rC-7%-^f0m{+$j`muUfD&GWqv~QCg7$FuZDfVyzqVSPTS`< zV$m(tNBlY1NJlNS0C<;R-866O6~`4W=07VW{|Ie}AtD=eCF{2(8}= znX-u(DMKh(&J{@$n7ODbNm2T(c`2h!_<-I;r}*7@^W!Hm@hy`hsy(2py&K{>Ug#G{ z2xi%WPGMH_DJ*Gq5aKaS*)Pc%2_rGsLok)N$Ufom-R*?G-1p053BXVW)IXFIbvLm8 z#c^f;dDZ>i+;P4$>Hq!a&hmdcxBr>FDPL$GRME$;%8GOs^Acx9QX*N7Q0@alTqpRz z4P+t^PWYTe6J#-R7DqGC!MASv8wnj(^&v`Npb&y)A4}PpZ}p#v_Oq_`nx_x{+K!`~ zVA9sNuYY&D>r1K+dR<=MWr@mDDUa7>$fqB@9*LTV$|iiOhYKcrx`znqGjT8Z%(+x| zn#}w%hmEFu;)l+reDa5PrriREu%>>klNe*2Du?2uKJ}BSqn$E`|Csfu@61g;t0r?3 zU+$Rw1rHg<-^tQ@Xz$cbJ}V~m6JPXAKiem_<6bhE{iP4>C*JYWduZ;SOg`DuuTo#O zO+ULQzliSq$KQXZ`%~VbF3zN2{2xz0`n(sodO+MID@VN55Umum87D0!w)D$jv^YRz?5d315K`q-$4zm zgsLHLg$Te@sdtCo65a_xFEcCE>J?444zCK$HVn{m%Ld=@DFT->p+I?G2R$oaoWUnt zk-nzv<45v95TqBx?;`~500%v%1+#1Dg~$ydjz@5lyzfaMZi z%LMwNbELzly~ItfYg@AiqcTLZU>~^`@VD!e$LU{^F6l z27*tUA)@dIU32x}fcaBAN9~BiW(`cCbEN7@Tyyljfe{cn)b)y|+b`hQ1_Ok*fBin>u;uuo<+)z{5jhf% zfE{*NKs_Su?Bq1jGi=`o7zaLkJ~h@D?FRmMG9eFNR1 zJ8U$I^%Kx`S)?EEcKLG(^@0Ly)^q80-y7kJKZ1$2PDB z>73#@ZpS$=_t!IH;I0}+tgms%-HQc{&Zu5P|Im3Xm^)HuXm1-@P3a>1xm_RwR5$VS z8Vvg_EzNs?u9O2-9|IJR?ykNwf8Q9`5sHqC19)FDm^#Xaz%^tbTOdCQk4RnWj$xoU z6u#;+;=8&i6e!ADLjEO+^Z`cMKm0@` z^e>jih>$;IM^NL}kihr!fNU(M3XeAwXk_*e(bkdnXr_*)2uR zlAX;DFYgpYT6l&KG$54;;I)AyOM5d_U9nZXr6kZzno= zREg%u!7qmt*{Y0G{O%!&E0~ONXC+Fzdqi0frA}FpwM=D1rJo;e`Ys`gJ0`C(=+v!) zL{oMv6{reU`BD$|L}dieFNh?rh=VrhNtcX2wM?^z=~qe|^Y0AR5%(XSd@+RsqF@Jc zR+74;f@p!N@;+-Pu(vo|%P)%f?+vzSYs6iINJq{a>h7+d=ps^js(vXHK`mLCx(M&T z>_Hl|U5dV+KFP#^i?I-=>kxhk#B2$}>~lW=$XWd}N`vJ9%F>$&G)Jy~*@6z_+ZExB zl&)-joM1<{u&9ps#nYvW;O||+c;IFE1ZObD2{Rvb@ZhoLaBJt&pEYKpK z?UI;eWBd=zo}C|7-zKm%?7%ygza+fpW)n=F(uf~^b-R4WSL!1uw>;ecb_)2Mre6>) z|NK3O+O7@v-_1hGz3V`|0k>Bj?4f!f>k&ix+RX+&19NM_Yb8_cpBI47Df(sMR}$A( zFJhway}2rbu|;*cP=1HYC>tUUID)W!KY`ES(z3tjGcf~p_?dn{&?X0PvbrA9a{r9a z@B;kgWMw^e@2FrJNYJ{vht%X!)i`tG;e(^d=N~kJ);ryfTU5iuHBC54 zpk1+xwBqo&LPo*G&LO?goe)CV4+o){PaC_St=-UcE26H-NaPJ(v8l~X0t{G2qt$Ho zfi_ze(u5cMQ)<%9BBA4K#4H^|-@|fzXWaKK&P!JIR@U#w6kI=fowq6FWm%eaTJdQ76}Y zqM5je?lQ-%+rDsl-&yTU*61UBbD6;zJbP(6j|E^x^y=UpqC0Qxl#F?s@5x>Nc#yNC zw^Qxvt!vRJn8i!_uQg|mu9_nGS6@;NlFCR77IKU{Y1I=Tj{_uYPEegtqd~-bXL8uw z*4SxRh*Zx!o96dwO~S1L40MUBY##R!n6Yk&Dapw>Ujn`RQo+PEfq;&`hm$po)wT5= z`J8F6fU@!ZJpgbfe<_I!CA?G!;Vramq7gq_Zqoco>kLs3!5U)c)f{S#WpWhD%ax9p zBl6ParWp2=MO_el?GoX}zWP!uxg6T*;K#h3lj<)*ytEOTfnB{#POB?R9jTj>&?U+) z1t`2jS*|UT)|vyM9*zJMoSiC4E4npkSzFqX9zH2dW2WTCqztvkv>%bvf^a&WCpCEfiRyD z=zjOuCRtC!E>BqjS=*seg-hxeuf#h3a=FUfzHX+ey2o4S{DhZHPoDsCkM&NE8R(!`n1PaIgf%lC%u>brb)s?4M{1OC_ ztZR7Lof^LTas}BJzv!_=`OJBw+-=$+jKRuy+c^H}g^ImYYdt23qhBw(^|a=>r2a4n zM2=5{JL+@h&r5deIj4ZH^CDQTe#yp$hSxg@9d^=hKKl(Ksx)2sCK3XoQMatZyV8oQ z;d$o0ZKH2a<#zk`OVipn%LU-4+EW>)8~yYTa#PB^W1dfBrn(FL*qzh+#Is0c-{u-D z@%fr+4E%~u3Gisf%O67)D7%S_yTkLiGwl?6Rbjbl{y^(-NxpXJ)O_*+Lomd=m+nBFB0 zQadBG*y6T|^6m=iW3iHnbh{1FU}!G642&O7=$QBFqHQfa9!%EuUFADxmxZ1zDBI%* zozz^>&3&1%1SV8}@RCJF5XHZELz?9rXb)WpI>!n6%Ig5Ke=sljmxhfQCkOG~p4^ql zyWii8Ywx>5y1tnOd<9t@zie={DmU_F`3%*j_-+XFxENG_#R8tWb70bLn+qQlU&JY1 z=#idG5h{-;s#8g0`^a9pJ`--agAE+}OgXx_X%#bC?bnH@q`FwUO?1D?tP#3!zwqs@ z-dglO@M1awqKBXys|NH|e>HUk8H2i&c4=+nulVYz$9GMyYMeCKyR24b>3K)5Ye%C| z>k7KatHj)kKHK94Ejs2$sytsBd@~iq5Z*8$JP|I&Rf@Mys877tRnY)CCJ7oga*P^3#tCaJwXWypeU|0>I-Vbx9AKy50Gw48@bxt4CU`H#k%V zK*iI#V-5NFO0kWY$NT8v3O%;kdY-FS9CK^S&iDqlKdHx zi*ZE7aPs=4Hnz{U7ezV{k;6Aak^Q>zn<1!i)-qqqyGanqu z@=`n+yca_tLF5osdl!+s6c{MUNGw6@*1BeN$~q!b0RlFN%cESo>gmIhDvB-d z5HnmA$7D|g4 z0yrKH5BuA|B&hRkobYhqT@iT>G8tthih`bb?NO%IOfKey_U&|JO(s>Enbo$Yx~dR4 zpm}lI8kk0;j&m5}rh?bvGwJRb^f`=1uObXn<0Xgaihw4%ChKV1FUCZ&*-VAWjFt7v z0+&^b2H1U>{I`I6M7{E}V#@p;w~1Ihsu6;h{U5E%+t0qT^LP|;xtgk~bS|XK{M&i> z=+|;cE1`D_ZPsxy*2xkJn} z$)1%*C!tAl_H)cMidY)lX!=K1jRAc>WGJ|e7s+O?lEa*ssVH8jX@>=FG(m(N*$N3y zSp^$UR!~(f;}h{N!!u8+0qm-Q!Egri)rE(vCl0lOJ!&7 z5iA1EW4eWOyB|$i$I|t$vvfDAoV>3|qi|jHx8Q+AR@uAA`qed?%=gu0*1Co_VTA3J z`O5fRT^`9#@}q?rfL~tL!1(#UT!jMKCFKv8>zdgASX{d~Y}O3KpG!{L)(oVyxLV~> zozk~4?p0rS+T~J31F<&oZyE0-vW_zjkpXf-V(HM$$XXgIB-tJhdb#Y19XksD4qDQe zO|e`jv8f)!x9(SKdP1IO;k0%|$m^Hu?yd4CI=u*lQc|(x!bBIJIpezyo+@iD^ zEMC(U^#O%-;Iv*;Zl4)h6v{0*lW{j-5~cgDmjl(xrU)0~xPK{8IRW-?06%5KKcwK7 zSnQAz<8ZJ?LWl`7)}QgAC6%t}G{U4Kk$s1whY7ssblVM3h6L9}5o5umi}7PC|3++e zx8x%{V5m~Dd@_=fOCvVSh9~H!!$TWwI1XBfdXs%=7s$qxg+hn{)pp1ygq3?kv<79R z%YJIT^Nd`%vz^YVn(5G65z?%>N<;}Hm@Z`CdXN8Z!5Y&zfU6oXz{3;RySf=vIPE_m z?RkzCMn`!Yxd-1Cr_Y-uDoMc?uX4u%2^slUO;%gvVYE`>F&hrZy}DT%9#T7or((HJ zz*-9tLtt#^tchC>*YxSEi4L6;SA(N56Wfx}vwcL{5d%1;VD%(~>NQN4c}ig;vHEbT7}}|uDm5ai9~YDMl(Z*ZszKbgyrAp9v}V) z?j!Pjk(k+;?T!oaBdD}SO3cb2#de?R)@4asq-j^M=FOAhNR`ZL+EfhD0zNfBm)<*5 zD{2qtN8o3nKb_(@Ew`j}evU$Sm87j$10}yN8`eN7IP|9&qhxlM0vt`o0??Z0h^?U7 z8eYt8g{9GHLo0%-2p4wA_P2utoc3dR6=)a_&eN@0kRKngMGxu2SC2d2Ms>)Qxv>$@&YueK zip>N)`hhnO{G1p)^}+%dF6uhv@!J&yN86?bszNsBb|cBDs${6|DKJ zfD`3>c5E+&%C?UW5dCrpn4BA=_60EYgxSC(H7KNZJqPHZ+`` zibkHEf`TkCwj$SjD-3gd{DYEleC5VkV@dof;0=S4^e(}fA?~}eEbJL9;2nVqoeAhY zz8WO-i?vVv4Nd88DTpIYwF1wIW_vwI--?rX#`Kb^>mVLriLoJB2hPG!XoMac9lBvxg zbnIu?06`wE9Vj{at+q>_(#%NB8c5O6+V0S8f!cFLzu{@{5_m|(Sp3DP)4h5l*NP&h z>`~c=_G&MJSXe42o()MD)z8IqwP!kS0TvU*(<6jXY+H!2MNo3bl9PLK332*EHxI*T zt1Z1_Mhw)J2EUjjqooZP1`K2CT2vK|*Q?+T?iE^-^`26d&QwmKuHI*yy$!Kg_5FL`2Ak}pvY$$>x zz2-FMw#LR!Rz3rpW}$5un3E$Y&gE$M z4aLg9Q&&cUpVT-->?_kH&AowO10% z^)RJ8YEfI-&3L)a`6t&t6DQEbdo?wu$;GI}DcQJc>e${;atY<+FhgNEF`CY;X4IB} z{?iFLTOnm-1$+~>e7vnI16EvxJT)`-qKi-#>cu7mH{2k$=c4|(E5Fuiw9n#4mgfGW zj@=DlASkQs1;yG9MHqOdBJ83|!)U%p6MEOJh}v(h@k;}7sk)L7-U2Ox0P=+SS4Kc{ zgHf1h^LbDJc!erUT%QHuTd&lnz442;<)pu5$V`S|XZ(8;ikK^J*2$VfiTyI1jB@2) zSlo2(%rYeCkmeSqCSR-$nR0YC0oeuAr|OJQpmR|orN&Hr;f(4)x(WxxENZpMY~MIX z*&5GBW{#M3uEOr7iAQfjWwbmE`)GnuT>BN~3aSUx}5`g*Yt9 zjd~^L+w5fnVByrDa+bvK(uNh%bv1%7CTFx9E3~|A4ArD=OrVCdeUr;+EGe>SoV=QO zyf^fud#vTR48giT#X0ICEvtH?YgKsn-}oa;ja`KSQkiY_zji6=P;RDO^Y^ z7{;~HlB`Y+^Kcj%C%9R_UwzU3TJBv*8zybM9ZV713Qs&Zl5jbd^c<^@Qf-4eAowZa zJv3TOU$%iN$lQsE*x1VAckk2avpzJX7v5j!0XM z-f128s=l9=99dMZs67MEY^Bd?Otc8i@XzeC^HtcowxKwS1w?+edb!98ua{cF=GyqM zyHK+s4~Mg>tH~}`2>#D0v~oFtp9*8-^Kh9KAZY##!iCZ^H+bhFAY~0YDh3Ev7QJ?@ z6+)`(E8z$&%-{&;7!E{srXd#U>;u2h5=GaXfSMOJQm|4L6s2<+czVWoY;=a}u-}%$ zRmnAhXT4)~g%s=5@As(D^FvyE$@~FKKa=;BX+}!bV&qS#Eo0SchA_@KG=fg>Bp!a; zZb6^a!K&>)*P7n-LL&;Xo`*bKGiHGu=j|}H{=gr}JAz(``s;+k1HIk5FbOl$@OjNM z5rVP8;SpaQq`nezP2LebWvso6*MhQTYTv)T%6*Oa2=boj3rEm6v#b0F?=yV21Ivfr zeSosd@5Agfgm=x#57NEQdd+M9o8TGi8l?x%Z`ZxA_FZuJxiIPuBR!(0Z}OctrN74@ zVblbDE>}|&UwM!+g&syDlq1*_cv6*{HqbDLokI6+lVf4>b1>Z|uQl#p1kDQJ+#p0G z_lkleLt~__P1=bR+lC7-j$wOxG~gp5nt8KtIIX48XCbI+_MR9gw|_M_$$dp6qF%Vj zg-!Fe-g17(CdUOWlU{cUcV5;ev?&CS+Sa_W$c;^WEr{z}++RXAc1#0u*rfcKmc}U! z#v&_BE$Jj6t}Pp+Mq^(atJ0z9Jc!4Zns%f-?;rN15!ZT7T~^9098(w`P>mkYo>qmL zp-P2BVCyCUZpd!TmJ(d4;+(%bQe>o6*i-H>n(fd){iolbt51H0f!i?S5g=6B4;C>T zg7I9hcr$kp?4@kHPg0F^f7euIeR5$1q(%!h&#J0E4-u`1qB+q8AIb;!_2y8~gXK(n z#|Fbr3Gszwd*p>PX0Z$z;`@OOkD z1CD9>H7O0Z0al6GVX>j*+~4cbJl(Rj_FPCf0^CUQnI&lwwzk)KKgA)oXqeT`pPC$6 znFNaqe*s;C>hr%=l*?2%jSKrJ*LYj9W=CZdr?69%Hcb+i@ynLQbVy3l>ef2)_uM;v znV1w1UMcyhp`N)X!Zb)=9Bg_BZpl)QYG*r8@d`C>oeJe3Mcj6xG0bR7{^Qh1J*xKb z+>%Nv)qusZD-1(-0Sr8Z3=R88MNGCN5gW_OqdL1}r44EQK&CnTEHc6m&=G2e99)h; zQ%*e~h0rL?IDId~ijn(eDw9Mx;2OI0T0@b4F(iRbC5Kt~hg*XxM15j*rg%2g7~&nrukgOkG3O|VCM3l+#^)2 ziU01>Iq&5j$un1XR_>_!4VMT*Pss9J*E{JaCF1oT&p7{491*T<%9wqM$b}cGJqW*w z*7#g{yF>Hv_2=|GDnA59N2ssJUH8*D`PJDgFiE1;O2qUkk`T)YQ8`0MXJ^7G%Ol*F zMTP!(gc+_X%z+6!8+!Oh+^Ui?DM;<#K~wsri@N~0sa7OnVpiEqO!*c;bD9Am$koA7 zi60i^(HM!?3;mI56ZYId`1U8f6U@23JG7W6xw7KyLHBHj&W!wGtzF>_@qR>E1@sX{ z{*?R-Z0sF?@hRmirh8pF?%y(WR$cjBgG#EQ`<4Lm%V3Tc<-Jv_cer_d{}T2n-q@ua zyh-8ulLij-Mj=`o=X({CC3#{qv8FV-9=6I7IgS`9X~v3Kp<9p71!Iw{l+k%3K;+F} zuB~VNJD%7PJG6ySfkfi;v5~^gG>tRCWTdrye*hpGePNKnUzO#di{mogG0Ywe<3qGk4c+KV9~etWkJTCTtP3i7r*8{4O=Vq6yJyaS_1{Gn&%wURithQ;jYB zmZA)u#2*gp)R_ZoRL>G$CC>Y`L}MM|Gqa+4;35|_0*Vg=(kpx_uD>?K@7-&3kNtNx zd!NP@9Lh^HWAZkb_-*E{h98d2|3lh41y=%R@1ou5m>t`;ZQFKsY?~e1wryKGR>!vO zbl6F5=07v{*11(@YU-S~{j#6-SNpft`UG70`hmf|(7qAFc&FBbnzQ%R7wU~YJp9xk z2u^?)(@o5j9!}8dvy_bKUvs= z+Q|ODaEwvCv|{&egT_7jlY7^ZOdtLlWBe$pe~BH4J~gri`F|k4PgxL4jgMf27mx)1 zh4;+xgbzln)+mEjM*Bb>mU7GEbEe`CF(mW;JydoBU&Iwqm|9**u?_J9_E@DR8O5!R z6l4hn{dAywH6R_W#y$$`c2-zGh&~Pa6bQlw2=*89pC!T_4u?%6W@2oy+C9d;gou7j z&4XZprD)26g`>ro=7EJnHOV%hqyX;F+ldPTUgX+3Iph-dND>YVNFWRNS47dGP*xQv z!21mI_iFYAW)NAh1CfdhHd9&UoC=qi#l!X=KrU%q*#b$+3SL;6#S9GdTNuvdlwy{K z9#XcqNUB}xhK9#wo!2@psaaAkLzpTQQD)sscb%17+p$|Oj`J|l-(Yo zoEtrC^ui!rOLyysh@z9hGAe@vqsV~{7$JQuj3}spP)9~s%R&1xbmARnMx@_pg?XA$ zAzp(R=IAdsQ5Z1OkwWg0C>Bzk{TMA%Wh^SRSu@>p1vJnjc8>8&v3G`r=!Fpj*$kpw zfQYlf^LRYiktzHzq>Fxc3~Dlg`ZP%9ArcJ|cz>foV-7u;p&@q`zj2HuZyc3Z_*;-m zukq7oZ}zN^qd)NjBqeE$Q^8)g!P!H>t4HA!W|1xARF)>1xYq5M=ofFhYHL?M_7G9^ zxRT|5mhlb*dp`y;`%h`@C*SQe6@3g6u-bf_R@C;>eEnrIIx3h5Bp+24Mdwk_9?kdnq^^?c^x*sxZ)1vY3 z6yf9{N$KC;lUND;lZ#3{gS#@YZ6r3Y4@&NqpLtCL&TXh0rV(g^8=SEPtYtQr6P2DR zsp}cAHWB7pfrdvlxxCS0PK#_oM`=)7%B9I_$$mxJ5(&-qj}YP=m=KU(n8o~4xxV_y zVb%b}G&mMVEa$J|fCYf>=fM;jSBMGP?XSOP{?e5%rFCus7!nVDJ$4L1bP|e>f9g^SHL{CG2NDL zP0)`j#IrFmeKIXP|vO!sVl68evYrz~KA|>{I`)h8kIpa4AQ& z8wRjD21|C}IJ=G8)}*EVVAe*OI}N}2EbfW0H?(gRB)P&Gdjq;!(V-~KaH^%EoZ=jm zm35t~9+e9xBDy@nwC9I4b+if=xLL%Nz;tuL!Sr6IXe2Lwfr202)T$AVDdW&r?=R<( zNhymkJT76HB9x6xMnb`#_jkbhp4d5Q+~9_v-^+8#S;H8k47mVWS{w1$vkl=MNs6ITnyca zPAC_5@8Pi&D({l5fQ$%!%Y6d)wd`cI(^ti7#`$BtfU@3Eu3a88sC$2x9_$5lKO=$c zQFEwz2Wq{d*&=QCquug0h2hV+c9=scAlZ3S(ep0-l@bJ9KCiuTohGB@0Ra z@~Es@u3_X$WSM#6jh3>ouWHhZTZ|*eUZi2^NhgJHLy~=BiaDHBWe*cFa zFeBt5wwC_)>qz6>{FZ6)I<^T)Z|JQ8!uMftucE&FMD5rF_aQYH?3OYJlm;#KP zC2dXZ{-p+2TDP4S_{n?Tq`8;YBs)>OAq_)Pvj7$}Ncu|!m9QAP$&z}D{c(76yuR`e z7ZQFjC=zHm1mCY%p(tT3301rZn&}IiX-?*xY<&J+9|(K=N*rn&H;yaEjeT=_7HW#Z ztAO1u7zigisWcbHU@d?t56xaeXlC0901BOFV>lqxWYX)#Av=G;+?I)kB_sO>XB<(A zB#CSFtmJ}W8Ge8JV!bBe|dNiPf~vmlIM*cXt`oRZit7B1I1R99Jgg2b2iy! z9V^v+%&x<}N{ON-7DCd>%jJNiZ>2#>?zzZ%>cZS`&%zkTd==^@ql`^azG2$wiAeX< zo36>DF+178?B{}!z1#c-=vgxzO9`>bI$?R@_2{Hh%aw(wM0VU|7xuY;77^M#pbW{K zWT=mFZQciH_BrWR>6HgEp^%wHJFvETB<1GtxNUSJ9|ynBGbbt5t(mM@Z(DUW*4L7# zH(vK1d*FabOm<~-_>hs9KswLvE}%Vel3ub*sTRM~DRL~TL*|uClxrW_b?!IB>Gucb znLGlcCcavc?j{KBFC~?GNV{S6NY`OJox&gfKm-{o(%@c>^c4f>yq?rjh^ANbuiiP5&2c{S!Dtno#avr2`+=#->}72j4Xz z?^c*V;|a$YexaRHs7d4vCm3kr{yaB6BK2TM|7snO46~{%XwhnJX#uNl){m?$lH~k65#O+I+}jmu?{>Y0-bX_4(|aSiR;v-Xe_kaengoh6bGrv+cU;iQcmh zgrnOy0lMk7jRl+Oy8jM7)^+a>X4f^$kXDg!*F+LRVv|IX0MbE)l4K>sk$l+;w2_cW z7!{EcNfb%K4JDH#3RI91NfpV$DHAwkkd#UD+Zd6nZ6`x^1rclhljjr;mW9^90uLQY;DF7`}GBC%%1U^W}(UcU|4R99?E^ujedKC zfo@nY3_OEpsDVT#-n4BE+{T{2b((mGL74o7=TErx`l~UFZ=V~*zlVnD2YfRA zi`hE%3={hip>6Uv+T+t75A$;{o9Scd2JvzHq%{6p4?PTmZHg%2Rcde)ou&*q^B^o! z^v-w9$-%;gv|w>##Tatq-65lfv}jw+NkGfMZ4=E=0-FOUOj@WHXpnZ_uIpRu6SRzd zXW^JV&4~^42&`(}S$2Vl+%(Wlol3V+dT_-e&S z)3l|s%Zz-|Jr_m8JNzI?^>w5U%raT$L8EYc=W0y0MDSU9-0U7b)`8hN%-^p`hx$e~ z$e#&;ci7e^SCh5W6A6l?5SEK< ztaz&X@xP-?V@OO!z+u=mh1g<`ZUuR1?-viJ=(wvBA6|L{GpSp|7uXVUr!qYjd*TDc zkm%&uST&DQ4~e%~WMssiHHM7G#_q;>&I8q!zUoh7?2Tl2U4t?MTkhJ+pTUzAWr@&; z2ddMocX+0}AQ7!Hcmo$Z#7eDRJS}}+6)Wid^o~>s><^xY5|NjqlC21+Ayl6=&Pq}& zuL*K+8h|x#{F>|qWYzc68DrZ;c#30vR-f$Pwy$Zo+6miTvu~iKLjj7Iq0Kvr5rnK~P!!voAq>wo#%jH;1y&)2?PjI5G{)W})T`9? z%0%L_QvvO^Snu*AE4CoVjP-1G&xi}p8nJMg;>p2tVErpGiMIg)|7~-THJpN%IGZ|k zv06)!n!zF&O&vR-I=Z4K=9C~n>o(}N6u_=Z>s4|tky4Ni^uGW`+d~m4(BBZ&Q?iM9mimmCvcjl$#x{H z9^HgqG$>ukDygf!Z~7r8+6xh8ocG#rZ{0LXlYlTBJ(J@Y3>uNF(JEgjZnMhe&C zyu_}B(qvm zOFGF?HRk#L`kpbFNwBFkQ;LU*qf)_CB0k@Ac|Kc$} z%0wJI=UffrM(BeC*8Kjph^59rgzYPXSR|c#$Ki|Xe0z6J2+H-Ujb{LtYb85kxIJ^Y zC$|>MxvjyFw}%4VnWa@h_Mof;{@QpQn)DgV6IQ6T;Lz|W+CCdNE0U2has4B+i-JYj zfiK6=)QUxvY1!t??*lqRvZ{(gGyqw{(52LHt4mR5myA9xsYB zPr#)QMe)$tILetaWRg2%g_1@ba1Sp_M^n{CewSIK9RY5D0tO029Gco`A%Yl%#uqf< zM92ewTv*Bhnw)8YsmkEFsT-SB5aMkG%NLDt564~WYmYxV`hXH#2PLK~rg@_tikwP;}@;D^WJcEHlky7OVkA(ucx?I`; zbaOF+JNPg{MRhZB9{Q&KtaX#VFHVqolZLgd(;UiYlJ%6 zuyFUptkGNZlpl6v>e0=@eK@pvf}I9oIOMxx?uXvm1iXV_Z167XlJ|+U!RT6O`$$_7 z9nx)x=~``o$tsEvHwq>211kOSuXN*wHi7)eM~;4M7M!UqD8Hbsa#0IwKHd%8=ujHV zDE!wIrECD;SA)B4F!%8VM>B!nICUpKeKHl*?V6DcY~?m9Dxx~ZR5e|?^VTaXKl=pu z(+pRiDazfWRCwfcPob3Vi%4JnqfWq)bOg4E9ShDYnF}T=oN^utX2lw|G3nkg6W(dY zeBn{ubL4KpBIdN4EXU9yB^yG^a`yR@7X#&;_bDu~m?;EfVb&m|w(>ccsP@f+ zT&BOw!E6>(ewNpKOuP>ys~k+kF`VGK5s1V#NjSMy z>#!F!X?8q*;W~AXv}$A`Y5pyL4;8~5^?09EN9?}x2E1lCb0K`IVBaTm!xI8Q*_7Ac z?UrS$@cA8=e>7`K=Jk#j;4<<(S>QN09Krgnn({uzXcTmtIR_?c0c>QMqx{d%_`LEJ zB9w|z3zhJB!e@C@%=yA3f7hN%F;jz=9ax^t5Cvg`l#76Y>1Giqmxnq>B)%jizcfNB z#!yd@;xe&jjbFbY`v&}8M$L5++C`aGV`QKD){D0QLfA5>7RJ{(ix@0v}6fD@xdgCOPT#yMn})=~-xfcvz=M=tH0;f-rTM zD7ELp(30se3fGtRTsl4sgEo39RD2ubB4|G8@WX{8(aE4dhd7oYQLT7ze7Z{6hfw#P z6;V}rq(Xz#`T6*1KC!IRFWV9jAsG~o(=wbnmEMTPTo}nj7E8N32qTx{B9{8GK!--9 zT=L-jnY&QTRjSZfv3j>b$(y2SXiJ*VH&5xWTv$lCE_{5VKI(nZ3%tI5*Q)KXNY4#t zBRVUJNtn=^~vK1O9$a|H74LkUy)=2*|@XuKH>BdHZGpyyB#IE_(L3LIsZ(J1lh)uq; zZ8*s&6(*dP>Z=v((J#Rl7Bmd&ex7f;-xqjnfe9mU8jo$@)Oc=$H*E-!#8qB8_=}-4 zH*Ih*gEEA(Uhr!FOkW2xhXL>W+7*MpY7hSRzQq3qAYvBQCdwX8&L%ehD?qZMT$8GS95Mt3nyy8l_E2(C=NyuZ^@j%GMPzcP5*-QT5*YxrWjXYYZ@McT z0)#V49lKYuLNx0|!8HOFZ%y)k0=XXvEypox6OQ@sj#t@E(|22qI=J2Uwaa=C_TTI# zIOqowZtJ3^N1rRAYR8^CqR@@ID2ybB5x#sm!}uhBHGuLYe^r3fB!6{)^dve7_L@Lc zs9LEuDu5<1BWRgKC-Fuas`pHQ{R-_M2VN)EL2xf%gB%i_1ieo!3o33@OWSZYsncDjVN{L5O0$fJBhVT0Uyl(4#&wR4%{gxx}$G zCC8DbKML!v9CyX##m9DQE9MLlh3I#IK<=Q8iVK~`>&^wpb6{~|XJ+%VdoAtVcgPZ$ zPQwN9syW;L0+_jXktUG&!j0SPf?usY9jQ1ObucG%$>u5wyno>Q2lry^X?eCqzQ;+-Co zW!d-zm>fNBSEo47W39u44hrR6>3(s6-p)_1}!)Muq>=C3@oxw2_|4LF;0SZsjJh3adMaldE6l(`r>R7Ks^xS4~urvDmn4ipKk6;}H*J z`N*>QMP*8jm98Ia~t>h(LJ{ATsZv*<=Cc3Wz0#` zg)Ms4A3>ti>cML9a112IAOuH=&Vz!n34I&R342+t-1!R*{huyH>N!kE#Y#2e_r?RM z6{yUXkta)r)Rqfm2!HA-G^w^NM={}9w7l#r6tUit0P7(jCT;n@u}i_HEM9^NIsZNj zWn{)2x0bg5qMWYAr|^8y{^PY+KhmS!cwBZ+uEzd>qJXya39C?CWrZU3L{{q*nW%Lu z8C)S_BEI?uy?f!(!ZUa-WXK3v%yzt!dkn7kjhVD!4#xhy+px_=x%-Y9Y5lpb3!=Cq zv_r`*QHfJ+qoo?OtN1f0)g3I*B0zmDf$QqQt z6dbZa+rs&VEvagNCG?9n6=mxu4M{9yiv=o}2EVv^crE?#!N9wZ%C`2PQBjE=QnpS% z(?LJfw=F|*;ScII0Op&}2htY*99hb4{G-A;RtzK75>r0gHr8;<%u2J3NJl_7VR?|Y znb-jmeAlmMDPjnWv*M;Q7y1JZ z_`ieJsB6@OFR%A1blZUUz>jDIc8O1F&8Wx+mWfaM zytP{o;Tvv0Z6(Ju)uW!RWvpO9tk>o1$v@+L`pA9AA-~cczWtw+v1MjX@3>b4kcKQ%qu>^R-Sl=@ojQv)05 zM2#8KSrKqiWX;J8l$=G=EfIJynp)sHfjV8r@Krl9k9RTc@rpmH0#smgu0`}h3 znHW*H}1&i6(zF zV$f^x$mA9O1icOzE8^Hk~>lI&MPRzbTj(j*)*MPTWG7;)r(P;s9U zs<)=(J6X8fcoT0%WtR|xD3?JIi(pm=IDgXTL+UxVz-7kWO^!{rFwpwZ`#LWrKj#R7 zOmTKtsH|wW_wBpQg(!ng$vo8%<{en+n&?)!3xLB%1|MUp!n_&^7%t`(boeEy-E=UdlK{vH?n8(A{fmSNdzb}8({2Yx$wpbsO9b~ZX}>$@N4 zXW&YXG_tL25wsT*GpSZDaWjh>rscXMiNw-xA5I0PRaDIa=y<(WoRHw0g*^v8tT5+T z1Qb-Zl1|39cM|!~z5J#Zs#v0cpcp&~Mx?bFnp>)4Dq<3C| z7FYb2#QiCUe~(LvsCb0PgBVH2{*Uxb4F%I$7S<(5-%L|)dKa`0HHY}9$K4V|OBl8@ ze;yu=;gznYkH2-cTpcmZ+En_&wL&je_PH!t6eD-YJzDh+ui}XPNE#<_3_1sFGkQwc zy;qx+Hryv*gwH71M>Nx?aI#qG_nG`ZFw+jV7d!p64IKW*l#>4@e(eAE|MbsYaZwBA zi#uxmSdKf$O2+{afsBF-DFu*?{nR`AMe&m+o`hf*;7&oFo@i{q#Qd9>q-RabN?X5d zQL75U)~2|64My$$XJxlmVL><5S$9iowR&~;%FN zHY(3LrS7Ht~8j3_#iGWRW~hklWuY( znvMn~6MYX`=<26qY@Ck}9^zeDoSCjhP^JzCZY#_SI^pLT5c8QF{r8~HOIo-bVeGLH zS`Sr`_UOHk-ncka5A}h+xBE@D_j4UTzSGyhi+^bJy@&3AFq&*nmk=NNg`O~%A%7OD z-6BGa^tn;Y=|-H;4kd64Me{#+@Bs**R$^H;AhxX7|8 zOb07lt{p4ezm=5>xp!$awv|Ooh7*gHaPEYrt8iB7Z>X4AuhF~E} z#h@LlRyYC+m%12#;5Ot-wwS8Be&Kykt5yn+{wUX9slv#c4zYE~d5Kc*Xe@h`LNO&4 z=s0EKA9m&oL4M08UmMtLv2%o2>b@P8E}?vGqJX!acc;=QdXZN$hfg!dGwEN+Li&jZ z{eE#FzNHhazolZ!dk9#%;`=NedaxaWLJgT;#%RS+S*FFQ=eW3QXR$vbz_8%=A<)Is zC{ZFP51eafvVF1WEz=;Ys35!<0>7Pwp-BhKNi5#+F0jeS!3-Zy6+3H2@BS8eeIpXnXDwoXIz%o6Ckl1!6Q;l`I(##uW8Z2oOSI*7a4YT1*@O@Y)WZ)dgb-q)8mDEP|_8%B~Hzu&_=Uc38p1;SQbv!p<%-3i*ZcN=K*Ki-M9*$Qi{s0VQWuf z9xzSvo>%3xXN|k>#9J6JVw?v);{2V2_Yl~I*S8AZs|ATt;HG)@ttn9(Mb^;KrtG9m zWM~H@hRWPxJnsTkynI4w4Qw)?uZL&sA)T(ph^)Zx#2YJ$t0R*=%0C$5;1)u|M!t;+ zn3C1GxV_VBvR~Z1Z!;^vR+j5jj_i?TosO{7`H}M6vQmY%4&UY#?t+$JI_%0txyJ*N z0{7|PO638oh=GIQm&c`{wDri&uNYSN(`G=g&umfI!B0Q*W0yc3zQ=6G(N57%ujOem zuG3F<-|1CHAfh%K&$0E(MQeNTNSaZZRr`r7-Kuxgxfy(HgM#E(3eEBs4G*xHG;oh$ zE^4ZbJ2_r=S3v}!DxB;9&S`WGH+m4s2Y5b7r!D-&C2L<%R#xh^wAe0Y{F%H(#JL+z zZzaKiCrfFFr6&3wxTB@n=<4$>x~S?x!`fr%QAdapRV1WFmxDB{a{s1m)6m=>R-MYO z?&~(G#x)H8Tc6d&zw-CSh*MyJy!UPit@kP4?M3-2FvF7rW0_QF-CMg#;a>QxI}6{1 zdu4rmZX=fNB%%x1;s( zO0JeM^>4gCcd};-9k~e)*g}XL;OBydCE6%gvZ643$a%~Mp-LHYi4)7x)loUvk)(%; ze$N<1(^lazU?oi9y#2~X1Er0#jxM>&6?ad6E-?rQ9>Ln>psyopPFMjn-5=Gjq?H(O z#Q8Sc8QupgFD?;gnlq6>5u5adaLD&Az0Q5*4Y3X~=aLw?0C=(N!b#ZfGHJvE+o!~c znsu~Xo9vOZjNrzC+-vRi^VSktw|^}cf7?uu-@b=LW`4D}*cZ4BYXX~VaVbi5W-J^I z8~J0ZN2<^D_-9hfi6Fw!JDs~Ph=oF1xriwKOmkd~zMFYYdmE9}@{gLfV<8@lmYFT= z6I-*0keGg0G8(Vm#Hw`KT7rbgJ0J2-s`05w>eN6JzN!7jT2SoC#Hm{2ezg{w-@Jt! zh?Gm%*sIFt`>GDxp%r9XJ9B#mPHBo{1aJW{@E`+@Zk4 zZTYlRPDvG@%SJLxz-@*{bo>%UEfDreJPEf^ii5S{{uX786u(F+|oJ6z(`-$k;6$ zmpJX4(Mf~^?GPW&)Yr%Ttc({*f$RoFX4{&X8#zfZvd2fwu6(>PSm6tP92{lexYKXG z3RJ1N^Hi?xdCym7Al`Q>Hy6X2noq-Lb5#xAhvKD?)a&bRj$c}EIC4a0LtaGPO-zf} zpR{JB^+8fk*Kc3Rj7L=qx!di5%Sew~y3c*qCIMVY@W&&WB3u{WtRRj$RugBjO}c3b zol0D#>Rg8S84*9nsINolutTB}FQjQYfNfQ7nM?|Pkn z<30ajSb-)x#^xy~>vC2kbM_hEX1faZF6ZtK1AG#MdQ1L<>#ClM1Igrb6it%J_wCK^eXyOC-20IH2OVf?6C z8}R(Afja~Ox ziLxl`6Z*z!KRT`R_aF|xXYx^uifw(DE!z-jEeqAu`dPj|}jm#E{t35P5hO#8G2H zfe+=Nao8zGj^(;vIVZHv83FJ9aYs7H*1DY3FJ|#{utcaDdE#k@Dz>$0vi23#Pp_MQ zx!QHNz_Ygz9S(xAy2subop0Z?umTz;y=_sh>P*CiozCUhHNV_Gi?XOB$h{0X**7)w z*rD?|cP{+ZcEN#PJ_lP?jJ)Mi7=H21$us`=_v$sJkrVc~6s;y(=vPuvJIvGkM~^|$ z*NO3nz`Ebv`j9{;Y^)8kcqPV8c;sp37sO1E+0Vhol`;Qx`MUj2_&b?P2%fH_+2CrN z;7X`wwXB5(XX_q{dQH2Lk|C_+P{`TU0Vtsy4!fGA@aXIrQc%sm+gE?w-z1YG?tN;! zb_}r&t=lk+ehMS*U3exq<-tYP!7S(FvgiM3-C>gZqAivk*KgT6r!p3(d^cir)~CDEER(BawZntc!lKVi%J#S1PRwnVhgXre2(A=92ch zIHz~ugz|e=!N#%j?FmqTX(|fLnokX(aY-a~nJV#V>sU%k?_)B_=2*`dPgt{%1kogoAj1IrYDAP}W1MWLygXsH< zc!SSBS4z52f-rTc6eNq%+rk3A_i(!T1%_e$z)9MwEQW3E?^G5vF!d4g%jN*aGGUEA zW7)VV_ubOegWAV8d_H|^YNsw0aDd0hyS3~8%L(wGytp|ujU>4~H=I{v{-OxCzyv+}5obVc*D zgs)FcuK(`(#?Q`N8@!(F9xeZ>%Cf|Fs*P!ZyOH8f=9ceshbNQx;EN?EzbgsSi2$fy zrTqDPDdbMD@`2NS5t(SjhF0s8w6#Og6OMVYZ1K#BUjyB;rFBXfwZ~6zdMT=db{ag9 zu~nDy;0d$I+ZGe609P7Tnv}X7N<@!eV1;{A1pXFfcq^H{7b)tkBITVx%hz{N90dtt zAN&lr@l4x!%Pp7XhDrC?SA51xGmu*keeJaP45J>6PzUEH`ps`&_5%aT8%UQu0TO1Yqc-;%0E->gwHi5G@njvtM1X;?l&_!^I&LoWIbms_GD^y zv}~c5={Unqc?6?#E5#9N4wf;7svmY5K?cw#=8wasjqCMksa+~J+xoKSqwnA4YS%4{ zS}M_pY*XPjM6bF6HB{(P+hK5dpSyl!KH~E3xTRC0kBvulRYKo`1?s;4x^M6iuRR1A zK0JpR)Tk?iQ0H{tRDP5ebUt>@uii!$e+PeHur4w<=J_o-!>Vwql_k4enBaB_vIduJ zD-vL9h;KU#=S*!|Si7s&0mti1jz`*4J9FuteFe;PX6GyN3Omu;0%Ye8R9HH3JSQN_ zFw5*S9Nb*?LeMkj!mm41V#az-5>Wkn|`_Mm2jO?uJm^K zO`$g_tG*%zf8}gWpLP%X@OLiVv1$+N9)M|CSstb z7ehZV6vPhT@3A~F)$X_O{$uA7H>6e7=Y%8OBY>t5dIVvxf4gx9iAa0KN( zf7Ga(xlKsh)5*FHXA{8-`NyJ8Rm!ww4_{Pway1HTVXoH3qydCkoi3}Az_3n3TO@Nk zCz_iD`S0@)ToZ@sv;C5I52|g09ST#Y59nNZyXoY&yNIaQ&-mvw$pZ5 z>G>vsj?S?Z?G{=a&sl>x0jvnhV`H}j+s^dWtI4B0icm;LcmH39KT%LnX9G;;74mV` zi1fk=Aj>F-+^O{_pnKT8TpMK*fuVjqz31IeUcGMbzKw4OnFxHa2K5>gMxjxeCL2Wi zYM9$e_432MnDV2~DN*Dm-rB=%#($NEdyN@|(Hf0+lZQZ{YNelBgFYqPg@Jygf8~Lh zNLvX((n!f)0%JmHAT5+A5}ssPLK;apm849{Ac~}lbb;8Hw3u-!y6~sTb6Y_9i4g-P zonc$Vu2u*(ii~*FE*Ti}OrZf860ACdw?;pW2!?YAEvz;sUwL07HtND2Q^+~$q<^&m z9v$F0Ea0UrZucGf>=p`4ydjAg(QIXSXHvH`&Q~(TH$*Vx5*FX!OFC>BVuiwzr|ypc zK2XY&7D*U_;Asy^cbf-~|Ht4v8#63}&g2av0vgb8<*I+SEo)a8j|P}b)e}k2v@w12 zZ9or|A4p7<)2~eB8|8)JTJa3kQ?#dfp>(TE<{Pt`GqO*mx3e{UV}ihR2dUv2uyb&D$=GZ zoo>pgj);mQh$YI>LZ{0L;B!@nyuC$H|Fr1~w1HJ@Tm|8=SC`q4g$TKT(6z3!mAiDF zUkoZNoD}2V2$RnrC)s?wKF4ck+n%dj2l+YulrDR7~h}qc(W)swqf+dC6LQG$n zWAZLaDf4A8A$JSX)&#Sk_nUtzFj$j4}^?WF8+l zs^+ER?Wi)QpA(69PL%G!l&NKzORT^p`ObbOwG%)^MKYA$MkE~UJH(ifA%EF;W;79v z`@u+$5Tcw*xIXe!Naf<>5w};Yvon#8el2CM?nitTn>ZFtLp>U>BtX5vck_?;&mv(a z@+C~f7&rlA9?eM$gPQni00rxg4=cz=SfTO=q(VE*ZU*b&T9N;q7cotBiG$We>{TE8Knww{FC#`bg)`NxmUkYP#+?d)0hUWGa>;w2om5_g+c zez6%>uT_P%)?gMfOmRiS=MC+5AR;Ps&X3zH7)m0kppqG_dX{dX%S*e8K3o~o&x$^z z*`8kxXY`lR5}4@@Qjhhxx0uHn`)Dgga%Kz81#v0KFNCp-7#eF_RpT)=^_X*|-Uy64I^tmF)ZKK3v>c`jYTYT64%Jyt8Au{UFIo94WQy=cIPXd3 z&Sc|hPwHf!A0rGtc+eLe9AHkckN<}^l#Wgn?+N~kwAA!Q5S!eT$oQQevsri7IU zhbD8}OayU^!M_@KA(Ue`Gog`SD?rYsf&!F9u4fY%J1H;kN*La{B+W|MNgdD1gIkeA zWnEY1dQ2IrxEI4)@KM`98t>kstJ%+CW|Uma8l znwd(lyB|#(_{|KKc8pa?O&{5me?AP2jV1eN6VHN9dBXY-TwewkOKC`dDv)_Doz`<$ zIf+m zru29MWA~Sy)H4nDtYwTU$)TrNsd26qz7`tC>m*ds0SZlnXLcEdw;%po4aGcbCKdyv zS@a=hg>AE;j8CcG8AlhxVVxq?KOOteIVwD4+?ZjgHilo(Kj5T*~^->1BR zrDdvw&*~Du>)BE9`v9#W_9$haz3jH?*sq2o;<_LjJ1nU}5bwq*X=DMbwppk9 zAK+@^)Ui7oOwPd@#2or@eenXaF3{x3oBr#TWUMheJ|L7??#vZdWXEFMb&HzZ(gy5U zonQ1w1`dGa0OWhE8R&7+{G4SicQ9+w)2SX2CtI*`*VP@Owc{*eC#X%u1#xmR%9aze zDTn7$T~4wEukiS0n!4kXthf%Q(qmr!#Q7uDz{y5>>!@*VD-hrOp(A9rV;+F6`G~b< zNqI|GPS~B&;3z%!_>&tZeZ`cyBh3YaycCl+ILcz;t|@Y8)PhmZ1B3i4l(Q?2GUY5d z+``bGW7R^qVBKN>xU;0$3(=VEV(XEutyjv1tdW`@Qx^#|Lk9cc%#XN z2nH!UfaY9?fCwuv@@FM#9dSsPB@?K|BSS4u9Aj1wM^Eg(4A_Sw5oHA*lF^|+Gw3u2 ziV_7y28T$CVx%Uzs)yOMB{a23ZA4a5!hF5Y=~eClLL0J|&FKQRMHuVRhn@A~+PfM? zfw2Rj7y9u+;_4D>2!8R$DlNF!@qulO=%W`LWB4}-jO!gjcfK@Kdx=u_9*5prZxosS z=c8K71CiKe&l`~ZwnSE!ec&5HuARv=uGlp|+_IfsM5fXV`P1`)r73^mV{(l4GW zbu`W5j=G%4L6(cVlu$d+#aQ%R?#TGh`pHZZvMd35;R1|OhkYWD(X76IPUbuLy1DOd zHz$~BB6OdyhQxYqP&WrytO4zd5iuTl#4)aMaZ93@KO#$Vy9@4Q8EeI#r8D$z_$$;qh{Sqp^Ne=l zWSI=LA~xm339Ay6bVV9sZ$kBl)VN__dEJ1y!=%b^$wqKjafI!6!PZ|Il9CM(b7t^( zQ;=HXRyq~uZ7RLXy$E`_%Hmeu9~l*h<|ZnAG5kAn>HSw|R;MzhbLmTfuaNBfrr9%u zZ0@tu&;OaBq52M{wxGX#8^!rgGc?}+9c=Jd}AI$2p(9-Hx}PYER@ zVX=1-$a|b;1yVUsWiJJfs*H+|w24RwzB_$tkkxT1{xUkLcS+b!zz@Z62~NSlNr3wD zK59G4EN8@z{;NC9{+F9eSIxdGt{oV>_GQhzRz>)@Z#>I<`eYV)I_Z5B8C}}R-;1^; zFDms6Z}FWSz{()n+JT@JDy7J(>`ZKq;xvb`pMylrFv^PT-Gk@Q_gpjxnK6icN6zO^ zUet8Or9b(L-3rP>q6b=cyJXARkHV>ailn#kv_2j(31?(aN0brfRXnasN@(qca$W>C z?W!KEM(i{+>%yzjm5BhC$mXSKrin*PFf+}1zM)ZWK<@!G%WKUIrVn#UT2i~WvGeZf z(KsBK|Iz4*#D~bScSB!I$#b$;Mcr);2FSHtYelJ;K_KPRK*mMqtgu1!lt=dl4maEC zY;Lf`{-;Le1rCMXf`_J8`r{P=WYzZB?fc&VR@CRV9X+yfh`T%S0IO(sAo|R^j;x3N zl8Hb0&%d;{#8wNSdWK*I8-w#f+GV60V#K@fDacoQnm`vG`L<9`pt?EPMjw?KihKVj zX_fB(cz-8MCbw$8SoV?ssLWveFCd|+B=c`n{1Xkq@zT;i7*T@dxtf(r5v+WKP@R|6 z82D09*#j!`BU_;9=COuHolQq4QC@4EjRgDgcVdz3#I!r&O&uJaMpM1oUS54aLG44{ zi5{)3S65o<_X$D^eoRQu&` zg$8?Opdshxj%g(cN$D_+(kHpcB@tv{vz*mb{ZE9+NVNK zRkFf#2UBLHe{wyel0h1EOCvd0v{^Um#F$?iLxM^JTwN)U`6UL_CENk7v6@G~A4%NF z(8hE-*gd!tPjPFUDZI_x%^8)fE^1#vW1P69dglzjzwWs-)=CYGUkCF3_59}{Xzu^w zK%5-SEa}Ac9i;Ug0S=Cej`m+~6Sx1dRJmTsetvkbghq)kRN~O>8J!K++dXY=5N?pp zny+}M7GK)|d9cj={M|c|JVkc2-m8Cj=oJtq8bNfZyLTlYD)6i!;MqFn$_$rRfhZ-O7T|}%c#$YR)%V_?9zKGh{HcUG zwHVajhFFrBs^RYgW9_43J%Rp`Sv=BB)!3Ir4E|AOJpZlCWQ6}mk&XU(JJ}lo{uWx1 zf{YZ9JTlMpY+bQLgMTQZM!9-TZcGq@@T{b!X(Qev0}saWXa?MkE()?hgf~AQWjhlf zf*N9d!=Y!YD`nix%ftJ7ZO9_Gq#N~gKyPELz*KM$2Nso@5qr5(hGfSE21l1>FM#`q zrwDYJ@TU&7>l^SJ&0-AqENUH0;T;x?Nz!#^99r8PTqvi4A50Q%$%XiD;Q|g@gAkAC z>$@*Ro@>@}HkesHlL>pb`ep98ZWuX?`r~ps^l+Kehzipcl-IRW_e74=OA3ps2nj3u zCu@6_fJKP{sWfR_7m??CnBaX~zW65#+@QJrc@Oj-p97gk81AKYa%Us4T8L78_;f0Wb~$I8r4 z2L0V)iE1uVT!Zxi-X#nL{*e;~__VUm{;*m@9iGwj5=JRhvE@eK;Mp5t@|;QvNXMm`ojowe$H9087ab&GEE&f{^=8jE z9g`zac%xIEa%w;JxdgoIVW=9pGOJ?iavunh@ML2HUqV3|U^5&j+0N27o3_pCsqu&N zuBo}hQgk&UqlMP&{n$m#`VlIpNWFSYzT#mrvKqy7&;vjClZblW;TtOi)rjI6A!{8B z);UrDlt{7FWzC3BxAgdsQB}V-mHW#j z$O!+`w|4;8|Lq#0q8On1_~Au92TT{nhPX`kSJYO)DnJG^^`fM-)a&)xAP>w@aeoPg ziOLJWXBxQr=xz0A$(ej}fB|{{0tqxm#K@>47j}=WJPU@_22i?K0`xN}qpV!1vdSze zQ)cBC{F_FYWF;%^EII!Cxr(eVIxJU8-?#m$1Ghxbd)r}K2RhA7Y4GT$a#vFok?22^ zX zp0yzg5?C}Mw6~OsAPi}&6(x?mt@{ew9LgCdL+)h|k2z?><2Cio1UoCvx+pTgSl5^C zHs=*GD)oL7-Q{2-w&UyKd?J7bLI@Ph+;;`s{3|P7C||1m0;m0U@%8-Yi2Dx;;ta6= z3)qBCz`)@v$QnAT{QX%*-_g+Y-wM*Cq^UU1kNnY0BSROLu~QYEtK=T=O+n|3K7f5f zNhAA#V+q(?VS?rPO^)Xb#x>l>H=o4)(PsWAf!^8CgNv)t-&qaE@%R6lg-))^ z{*~^Z?@t1n?hgQ^f>KEcHJn3JRSeZ*4k8~?bi|e-3Wh{sYON-WhztjB%_EF4Lm}Bv ztP6gFQEc6nI&}B3U9PDU*5IDLA zpRLWL)mV)E%J=tylKg0o9{opI3w`JH;d772>2W=kwA8x&8qVe-AW%7Y&~`gd`QaYV z$7rp1MQ_BCm3#Lr_={fP8X$&d)T@2R!!XdCNos;#k;Yo%vJQL2yNrcuM%s^AYnvu3 zdC8@{yt7_xLtBjnb`1fRR5~QTiSSqrU?2~MNr0D(h-lv`)(&>|*l?)CwE<%qh%K%m z(?K5!u#zNLof}J>lshq*?4w6Q6__-;bOzzTk2v>P4t|&h3B{>cXT1DEFn*B?*5CzS zJy#Wnu0enNh36{ZxrQaH+{d*JRiZn;f<0LX!9(=(K$0o6SYrTBsF9Q(vR@{?!I4rN zB$m>%u;m#yti{nFm54(U_(y8VZn8+c@fwJID(21A%$O+-MW@i-Bn1g=l3RSl2uyB^ z=_9O2ehqLdO1%1umkMXNWTpmrGVeZAKX|EtXxaggHJo~Mkj56idiM$dPf-h03)+t~ zV8$vQj$|Mt8LUh(BH;pQ&igLD_kf4wh^bJX=(3Z#Zxin?YX|3sa`yUig8;C9((->; zuKTx^{|krZe^v41aTg>2`cRJ9oNPLbS=dH6R34?3gMKcA>Db_$KS%~LMeZQ9H0ivf zDI<~zX_7lv?Tx$R?&HWUoL>KsL!t(UnB9I~5SPo`p3wdL{krNPYpqR2~H3Iy(Qy6EtJ~ z)JFP_pw)^Ubr{I|NPp~TD5iY;7$K-3JGEXjY~mzc08 z8*rQpSM<3^lq<|%8}rf|9LlY^g3mCshs&zdv$8U7ynetvz-3mf-y2L`m;=pbo!QKG zhkq54&Bx#&yl8@Jq6Mfs>w28twmy;>lr9XaCVuCqS9p{}sn4@mQE6f$zBB#iIo}>a zgH?5zplmps#?m$~tvLOxSkqvbGUBvPtHc|7oS|uIBu4@Q`=UlA=fOH>(8DpY+w|mI!M1>w$CKx{`{FU zx~|i$LMs*f=+4+0$_-eqG_X#b$JFuCC^T}7n01(FJUWURSHfhZQeJnGxf|r+x~f5R zc@OMJdyaSBaNJM69E@;CR$+LQ&V(xFT60Z+XZ)i`ljri1XfzHeC`<~;Qm8Ue+o~HU zQM&_sUzg^Jigz`_euS>_AE_ih{LBRwZ}C8Gzw}7jcwAf4Uk1uexQppL z?46lUZX>1J$S;rZ zsK-e9VppZK*+eu;aGy=Q5~6ThyFv_UX3>rcm~@ zNMv_=cev}oraA`PV%V>dj)C_)#WQ0G}!$VG~>&m z=oaqB_mO-7(k%u9y{!R|;0Zs9wU~RI^z$}FnAW%-^2ht>Ie8-c3{R#Hp;PinUGW8T za*FGI{DB3dN}*$fIq2TNNuf=WX1hCvJ-)^c&>cs~UpdaFJP6urckTxuQ|W|_sq=fK z;Vo4lDbI&R_XXH%%Y39g=BQrC;G8)|56cLdMz8d;8 zP?7Zly<$e0_)b6oO@oEhNb(nk((m^a$=Hn?mb0ND*T?b$to0LVX~UhPuPb%$(@ zq~@`kHqLxP{S`0uiS)v}zcS;Sf4r%3{O9cW-|_PA^2`4kFqI_~kpz%GI3d&MBg5sD zHhyjZ1xB_~wfaM2?)m5Ef@erzX(pttFbSc2Aa%JN?*Wjj>bc(vsCDHC&3}PPIk;;2 z*o=E@-k;ugzX0r*^TwVgix=Dreh}_qnIow9#L{`IILCMH0#KK zNjAsmg`O%nV2l=NCHe68^%>pE4emKzaG^D)CfB1&0=4hQ^p#hyTa zInzqM3{z_-IVz;%ap}bN;L&bcuDJp1a!D=$%dz|!N`|pxJH4wBAAY+M0M^Ob$B<1C zPCo4Yp+gswQUug{2cxTW!@W=z5v~&H4JL3OJkJ8@10Ad=T%(L*U?Y!sEC~7^^(U|l z-GmX_;8F?jU$Xwc^?0gxUFY*sE?x()Lt`gN2`kmr?j(lu+#!8_kYzkq4I~U8m8ooa z_ibwR@enIv-Nv=7pzF^+IGQ17mi6Bewj(`Uh+ZsyWQhhgk&JPn=S8S_Bdnu~doLn3 z-R8g`@Cnb-?kF*>bsIOb`~iM}(A3GLX{)W=M94RJ#Pgfi{qnrv5SHBSk$hjJ>#TpI zDgTF##=osp(aGR{HBqI%p()o}?aor0A+7v^E&P_u;yHi9!-vx(sPo-tmne=#l-l6a#$w9Sh{xU>j zL2^RUB-KKaf#yhTY%8CPXB|g1mT!(77DNC;wb~FG90F6ZVsAAo2)AIxRI9rc@3-2} z8{t?)?T=Tq%DMUO(!qVv8bX<@Z$h@tZJ=hSQB}5%HD$B4ycKq{I8`WiPREttNyD@F znXSKWCT2H$=yin$t@ ze|{y}#T(NWzE>GjITY(dD^#XKkKQ4VkB+*=hzxEvjdZJdD6`ojk+u#hf4p=Fh&26Hpc^d2W?sWwNqm zba2JR!1zjGO!R~!Nj@<2$wjAni@+#!>U4RUa7l9Pe`(C7Pg)E=u)4X9r&Vu3&I~=LIx>vYaTt z;zia!mYV(n?DF5*_`j-R+_q#tJ@QZwlgas6PnbvOr67@On^Xw>3Np6quC}z$q-92Y zaZq_Mh;~XfLvTFNa*mMvqT-P&+_m1kr*IkO8}<%?B(d6w^q}wW5ck zskR)MW(o8eD^-lmJyus@C1^ehLDOlQ9UlDg_Dlu9RYuqmA~INjE73Alg7` zY69eJr!alOOW0|x1;xX{QMY1{yw8{MmKZ4tY{rqI$QCgv_Vw}}@e1KXl8WVz3k#g@ zdBB1l{Kqyu-ol}nMy%-*}*NeacU70`^WyI{7P_=5S5T!?K5T0 z_Evgz#6X_EhQCWX@dc>(@&DQ}W~ULMh?9J+W#Za*aCSC!e!pK%x&C%HW3&-lM=F#Q zmlzixmmC)t7bP&%2^7ram#Pdus1B-_nkEY3U{|@Pg`d3F8hmd@T#GpDL_HAKPZHdj zPR(VlIp5JtI318YO7W4lv!*Ox4A&subuQ;dfM?qp#grN>kaOF+RI}D)^=^F zyIO%=XBM<~gWx{s^*vPyuf5OmG?;vq=6GkWo1Vrhix(}rVjd!mz=GIq$l2b-@ zlOlt4`-bbQRx^u*>UNb$i~v@2^jzJGm)46B@in!+vj)75dl}qk(dY&w{kZi{G z`%QwbXmZfh2E#a-HC+G01|z&5*lD*FrfpPXIQI4PjKPT1Egq(Yl#V#b$6jHNROl9@ zQL?F#H(gK3GDC+@&VgorhB44|5)?_I0XM%X9B`+VyQy3c91}0;BS_r@eh91%gC-#= z0gP)5+2rc$BczB&(3LiIVepDWxZA5Ly{L$X;F12}cY`Zyh0V$M7VV*+cS(r*0#=4Q zwRAQYuVk2G#~FFyAuI^|HJ$62)(6D|4$=j6eNWIPY~rK1#KxH0c0M-qcd~!Z2-^1On~(a1P`hJx?dhBYIjV> zK5J12L$n|tCHV=k`w93%L4JoZo;c5Qt;-dU!&I6_+uPH}9aL_l6ZAN$3Jo%KWf6Rc zRRDn*QLqvi$AnRBpfng&s$nW+ax?~Nc+pxBI>Z)&-}H2(ieC>`rzPVBvPIrgTPx~S z(1E#a|K?ArE!Zya5c8Pj5Otv+N^(I0i|bKJVLT}04b$*7f!RhQi%R%%ubasW$N7r{ zh1_K{0bZz2O;j;Tg}P>vIGfC+ChBOxmJiO%pgc!Xiby;{sO43K!?C0X_Jm!s|mwnHGxS=}}MxM^6cOz-?yi>TFQV3)CZcW`bu9FU)e`IGJi?$7H% ztk@-9bIvCbBASI&Vs1aw!`;fU`wSh|rkD_?mWYb4%M4PBQb}lMNa64(wBVaVAH-Gh zQ3{xn+y5Mq1&^Q{KFmkDV)g&_E4=qTB~x=TyolZNJO~Ja`|He46fBLszPwQKKYF46 z;!^m(c%i>b>rJZX4oJ$#pENS<>$S+CG_iLl{`~8WB%N`nGYU|`YYOW?%`&}K^{~*R z)+?LHln~**;WqMh+}LsQ*dg-#X7WndCW#2caNaiouL33uaM7Ou`)wIA6jrVSy=e!T zZ|uh|(;UAuJAUiD9naH!n;6UhT7sqrNd=<*F7cfQ$QjC2`d0!_G=!`2FUap?Zrp?W zX1LQg>=XOyxL>s4*~1pxI>&a2!&uy}cl40I8R`?`C`HRL)O$xXME!9O4C}xRF)%_f zMKQV2r`z}IBI6DtncZ#S&d}A51-|3z9SFcd$&ZV7fg{z<1%ydkA*wLcr^m72KGzyC zced&V`ok0HBW#97tc-<@iTlUtSQ-?{3>1Whtwe>0TnKc8cAX&C!Ql|%iY0`S-Nc6e zS;61K$3?IUjYG=~b46JH>P18m>WEf^s@94OFcf=W%U~O)i0f;gs}x8Gg^8DwK9UEQ zouwO%Xeh<))uU{cqa%j+&(*(L*~egq&I;!aGAhw%Oy>CqI}u^VA~8aOEo5);tFa~e zmZ_mir26T}*+k5f-nJ19BGnlwElT$_AY8xt%r%hq#8E94&u}IN1nXR(ek!m2 z0YcQAoYb_0)9TCx@q@=jf7KFUEv;ww0__7%{_@YZDn`6llRGzo;@U}Y*1D8vW!4n) z@d1RT^sTY8Z(&3fAi0(rTajf^qnE%XZGNPptY&*oYreHOI-o)&h8%lfv3E$%LfgGD z*ZWU#`w>m=5LU?WQbqb)YNQK=XvGE!5syuAfWF{YBjDLuUe}?%ibO6Vo4IeyJZV+F zHm{5#pf`ar5b&eH4|usZuZ(B`;S46pIau1qZ1^ZvJ??D2he%1Zv$CNx@<r;- z*3tfX$6DwrFVAwIL7h)Y)NWdXfGdD}?qNEqU!O$3PLwdu6k)CVM$g!GM-gYxY*lki zEYX?+g9yhDc|xYVDXIUt)L=6e5?gd$tvy_!>!BqkfXn=36eI*-&YhQ0EGa>y-hM!> zN-}!nOfsie`hdbpBGW>w9{yl%uLUa=46H7)z019*m4r#!q12$WU54�qiDJg|S?L zj~+oG&!J){oM0!?aOTX=V>KQw2GOK;&<7u8*B=pPPH_(&$;hi1WtZxjKxY$-QS!uo zFdQth;IYqNfNk=IAV1GEmd5@8`6|cD}Oi_M9L!gouv>t(TG#c#r0Ob zlEknMp*<17@!O_?ovgD7ZO9x~m$;ppV+K=eafCW%bEc39{oQ>7Uk3;EgpSz}HzgHR zmp;cLp_)5qjHiTENAdN7=s;s&KRS5B+z-;DS#38$?N2x4IuC}X@Rh}tBghb!^^NiB z;Q_HashY{tEh(*UVlL;X`SNEpArTtB)7LmmzHJ}lH%%|#wKs&tFI{~i;>Tw3{j69s zA`kfX`U)O=81|u8Z{W47e&x9&_MtIU*J6NvP-NwY0Pl~600`XpZ<{Tf?1JeX_+05e zNThP_?DLkLzN1Nnt15?thrHB}ZKd)mhfi%Ak;kv&URU9g*QMSQ*uVUFrE~qXFn8YU zuD>1q?!Dy9aCibLh7qBVEMQ-YrVo?pN=j{j*oEb+^Uvo0C2yY(XLz#tQ+ztOjNMP2 zJa|)wXG)vZv*;Jq#=Ln9%==dbfDH|RR5OTtqgix0Q4-RpQXfu?+K}S#F@uBKN@jK?Akr`yQ z0dKbmOLZRVb{@jEBFp z--9{dqgCDo)E~)SZqP18jcUEmE%|&;w=$*3KOqvj+E{b;oYpKp+3YXj@Zdo5>#$wBGPMy6) zJ9$p>$zzHSpSS^jmye#_EL@GZ9;Q3a%=E~<&rIoR`+R@B1I%7IvINDPf>sQXKeU}`EBRm{vZ9@E5nZ%&CP7M(t6Vsh zf>r_{ODH<6#BFtS%#9y1o6|-=EDrSZ&*A)s!=h&`T<%zv?+u*YeKa^3bFZQr+Xgzr zNE=IaL6y;r#wfyuF9vo+><$vWX-4HY3oA_ZECtra%UZ+b862PmAy>&3i}j!tr*j;! z(pKc?Qn=qM6LZ4DGo*M_=##j!Ph77&sxGZ|4_@c6gPH!Ue)|M=vt?1Q#bL5wjvR{~ z(eRk&Y@!J>o~IY-WtBNf@{8kA*|a}Kj+{=(s5VsEU^DpY_kf=HYokCt48EH)^w9TY z>n85cxCda+syWqdtkFQ5Y?3ddbJ}|n-m-KYDh+r^Ws6DP4+7{UhI5N-X@NzX#)JxH z;hivdC1uc~{R2x>aWdLmG9FY#+5n6IWoB%72Cd#5;l-)t3K&M5W_&oS&*5BlcVyIP zLnfPndB*|i-J^~te5)MFFFd#YvU#Pd*XGHkSGF-85F6QEGasn?XhZIVf}ivcN0uwj zlg4aluPQ2aIB|!#Dwx%PaUcZ&F4&C>|IyD)J`am$D^~aZS!~0ed9%P|)wHdRy5E0p zNz$t}RrW_BwL0cmdp-In)@7{KHjfwB&CAlyh-=J1CO}XK-1eXO5aQ7$=7Xw_{uJ;I zvTqrT$vs=Z0yp|fzkVy3tAXF-{Mx~FA7fWZpGC0UQqaY7B z(s+%1a|nawn-8>MtIWGOygvIkdZ|7Rpl;spw$*sG;{4g)3cy#+R8r5CN?%dNe$4wA zjNFHKeM)7*c zAAeymN|VPbaxf!i&+1zq3-Kq(<&8}_DTa!FGG!?u z{oeX%_-}F=>NiKbov#WXHpD+U5Ay%uJcLXEh87M^R^k9l+kbnFD3x^&q-BK9D(jBq zWmlTo09%M35~0+VfxxW7ti;W&YQ3x!qU_LS;~JdFs0T^;Ns-zRRtjsRR&$9u@%-te zd1$5?tkGshNQ(38%ig;88^_(h%X|dpF(jG+@PA(KxJ*orCeoPhvCKX`a^zCJT@2_V z6Qgui?wmvEDBiUD|3;pmXf4}O1dV|DAl^@p%o%%5i_95+4vIWyv>tgbiqsu_Zi@T@ zi4`5P0eBJ{stjElc`AVOWtRz2<->?|{jBz!4$>>zc7m+;yW%&-K_fTm_3{ z#5Qq_-X-q;Vp(TGhuMd*1KSm0`azFvgl>dxfNm`AI_qx;Lx$cxK)*XE$~zm55Nv`m z)aeSZTRBYX=|?bRD>=xf$7qCM8B7rjP1zlGUFHhBtzw6>tMC@ z{pvPR|BZh*Nv5(LIGXbNcM23Aqh}_a_#LYV6VisDPVtL`T7nr~$Dve4L9>3zV9|Sy zdC4b9Td*}baa+(eIoQOM(7+}B1D$ohzJb7z$9nm~Kg4viPo;sz8s;wm77lAjVT*Gv zAV66fWD-xr0E0g9NpSj+OvO>iMB@oyTia#S+)LSGmnmk6yJ09Csm^=Rl7o`7ljbMD z+wnaz^mtiV6Twcw99?1RX({$Ihn5S7MX<{!uKmi!I>ME}Q~wKBT49*CKw|HVJXkJKE$(rD+5lcY1&(r(})t8 z@rmn{ozn`-jkw+q!vdvZamj({wV+XfxT)}sAI>rh5k`oDg8Q*%VR3y2ts^*WL-WM9 zn^<%iRZt;O)GY%ye%9@Vjh%;7|8R7;PEx=!=o0Qh6z}0b14r?u0vy3#Q;16rUNqha!{W1&Q-&M zETg?3|F~kQObLN>_r6lVF)WqrFw3ikqUfEK1|)-*qkm@VQeqq{l9iFknpouoM}-fE zDP*}>OwmMVZ3CExdD&(ZB5vtbN~wtcd_|xDuIFMp;Qn#E?_@S$-Y#x31EWGi<5KSr zB?=)0k;%`&(f%c6voD@T38Kv$DOA#+q#FCu%}qiJ;c+*cvQ4}P=-+&texY3|T=5t* z1Bz5-72)}2Nt^+}S%k0DA@YL^kgyi$qAb!_tSEH~q{GAr!yIY&Q&y7)@dN=-@rVQt z=Ea^yG{Yp^YU80&@CC4L`5=mLKZ8fB#>NGxY>tG}pAV-XGb9cg6*tHNrELinX?68A zv$WA$s8%bcOibE3JwcLS3>2S@jP<+fVqM zcwo_fxW3@UJ7+xeC8)ri-3T2J?4t13?~p=^M~jrc5-xppYwDbEt)N}oL#sI~X$?wv z^P5l*Ji&WqbF_qql}od_IoGs$hh$Z(uI*e{JG4t8PGCB!iL?>H9&uatdAPSvEuGQo zgy1?nqX~gj={rM>y>R6MdB#J+A%XxVF>{sAheZ}Ednp-7o6sxHDCh6lb7BaahAGIW zug9Mc9^otfEv_fkdLg*gn!i3kFEqngFP=Ky9Sz@<15sy7@2z;Jxt(2*XZIZtpS3#^ zT*aJu4s38^HeOQgiFTFy5||6Vd6?ouB0cR=TnHh*F$j0abHn2Np|^!Mu?xL(MW@q; zuZbJfbOl>B^#SgGh9FAqZ+He7zQr(j2A35_%zC&}Q<&mW9s~_qB8o7b3AlH4b$tW+ z`iCD?8C}bKpJ)Ipn-!rL_l4^;*zuw$&k(KUJTzdc zm{rm3UuJ#mlU|URx1^@@(sTO?rXgY0?xJ+05}&*}l_1SYrbM%6S^dd7G!I-iyWxCt z5(x!gSmR1{d~(G0Gw!XHd^@>qrUASUY>l*is_tY zUx0{nGLAI#BJ<}Qm`WuQ9@&j(@cFS_+;_@AZaIu+`HvhXx;HgH{iUw^XeRGadP$Za zxSe6Se%t&&9BdEBaADcqMxz@?qqT@aqxt!}X7S*r!!&LGnM^!|0@5KgA!aw%(VD)nlv-Jo65gRt16QJX<~$$8jOvQ1`4LHHd_PCKLuj?{}hOAtc}e~oW6DtZ0v~z9KS|K4V)bR zXRh8Vc3bkRbUD;Bx;SY?#d3+~h@@RApt&ppFRE&Q7Tm&6vCmq6wZu;JpaLo}BLX)g zBx176FOR{_bj-E=J~i?3^5+%IHd+V^18Z9~k5sKDXUXNVWV)8ql z1KolAPE%97yzOXg*%BvA(wRNYtb;0J{A+rz`2bJvPs;&$T`*BQoQj46sD5Wf(R@H~ z_fNPDxKU+|#Syb%QK!KyX>4n`hc-y3kdR;nGWb{La{86T#^>0BJ{X53G@xf>w!D zgv&5G%}?Q{tRD}C;cZHu!U$X=bn1MuZkn}F^;d@b+wPf9MH*Q$v1$5QN=XmF-(SY5 zW|s)v#g_t5PUayiMINB@P`>DO`!#+3szf^DLApzSRU$?Iu@Xu9-<*_)CBO<`?fCa~ zOGbA5$9Dnv%3BT8UNUiPTl$Sb5q3!MJ>HHXOm!y4Gz8vpiM<_Q&%_g_xh$pja;hYE zpKQ8sLJPo3zzc%($mZmEBeB;{%`ioEb7L~%+38tErZQ1m6$6^PkvQ0cBJ|9tlH)~u zB6HTliSblgST=?nsu*U*wAoNuH_DEQb%rOH8Mbz)w*vM~ZFbj6W1k6J4NBRC&gcYU zS@rsjN|E&VOCNT*D?a!>pIuae1gqa;fT{}v9gly0{A*2wsDMeX^Xp$9eLeqqY=`>4 zk&BGJq5Z$v3zf~jrZ~PG|}0`>}(&^$P!6U$M8+p9AzBN{34A zXRJzXsh)aoxc{Q@^DNb*FLQvm>+HT5u=U$4d6(!$9O;o*LL|jpW3Fy`$DHu{&)In( zygnLJ(Hv%Zlxce6Ng@;^@$%FGFBC=BJOj*rO5+>;K`LS;BO^;lI^{}z;X%%;vK4;+ zUOd()+puG5rU_dXM46ubYd3{sQHC~S&2)Kez>R1=77TEl;&yWLc3374W*54_Noi8+ zAiV;S4w(_5y`3&v3+Ev+RA2r>So&$Pvh*F}^krz_lQ-SzN_w_*ny3I3Cu)mKh0k2^ zg4Hl82*aZfe1yWNavW3DyBkTgfcRK1=IvTqe;g-AiAb;Z&A~Rbxm2gla9_WZwP#$) zw46mBYm`~AagULyc$!-;JU1*mH5xt0Tsi@xteK|Txu+%?qdjUmfj1=1NsSH@4i%Y> z8REqFz1tcDT}qIEEK|!RY&DlbitAlI{{|Vj+^C2?Q>l?0C`!7qYgp%&dDwkBZMarW zW4NpY`qM;6Ci!~08GU2UvR_`qQ_q-fk;X|O;I^x=S?aXc+%fw4Di#FBYH>wh;T$8% zbiU|pTCC}1g91fdwT1T*J>Mxp+U1Th#c!&o@}6g| zmQ(%jX3LQPM}5#uH4!`>NQrz8NjSmS_Y$#2dBUVK&`r+6C6Z^}YSrCzQ;_D7=#CKj z*MHGNR&mW5aD3hFkpFRh<3E*#g#PbApPVS zr(Qo4_(f)eAqv^#5B9doZ&*0wfiVeE9A*cFqke=+4hsADiB#C+N8N*J!xAWDESZ%q zgy2}2u|LO04`mPTtZw;Gqp~Id0E9*{382!MZPx3c;%o ztp}PAt%G&@Xj&!vs#d6XS!Jz{*SKt>0ykGE_3@e|Tp>D$xg$dZ#af@fDM8BZb3#u0 zOSt}6C*h89hNQFgMXM9>?JqK~mLgL4)*>xNenK97ZhEjm3IYZWPfad-w=HI7xvA0wbm(u;Q{uOnTO$<$`SqxpU> z#;77^qa_ZhdYZ@nS-W=iE#i(WMrXcmQ4*<$QD$}Iopc5%~y@>S!#VB%wh(!n9hP(Vpajv0}i7 zOk==yFrPZNxq|wnz?~L-UNms1^HG~;Qpu#)!igIDaJPf>jP?7~C?7D;7%6kLj8~)Y z-6#tu%a5*m#Wv)`DpR%7Je8=S(@a4g3K$Q#= zOVb1VcL$FVk*d;Dp5=j|l-UgMRuTU;(Fbz!u2L)CXKG+a{O8BHU*Y6y_P0Oa)K&v=~h% zNPr6JAA?$_zYn{=z!#lksK=f_P#=C|6XHw$o@4ek?Ef~%8@ERlC&IZToW|_ z8Tl)MFp>(2BRr{|M9gRA;$5c19fyftG^GT(uYrov-sP208~A}1 zh<&bhkZnUa@7(hnq91!?oq;)a=siFEeTpPMM3vvN8hv)YAm{6EQuGG5{gF&S=splU z*{Ldf@ma_xw~Pa(@{(Grl6>w`bx?&FSxRvficyZLD8--nn$pM?4TxGsdLJFNy7A+V znM|2@s1n%F$qS*?E`C4cy+asnC58;O(gqSI#tX~oT;BBjl^y$IgfydmbtB|p|0xS( z{I9b>1%RQAz0u!MCrN2t7D*midtR_4#ZRz}LWf`E6!r{79$o~oU#M3&6l7a=y$&WB zgHc-I8FD`${W&-6HJ3M4+<-*#tF$IRG1|`jWk0s zq_i$aSGAKGv_f$qgvb+1*D-r7y?bL^^-AVFM=wf z2lc762dpa01LEEmUe3}38D|SObo|1tWTLHN| zx;qbFON)V$M3W7rG0DlK{ZujX3)iNG(&pYEiW(vNY^=1GN=0DVFVJiqRZ}6|zuUO# zh^dR2idBVNUf^w)wsij|oG0%*4OTrSX#H>+dWKF7BZYOx@}G|t9iBbskt8TPtlmNB zk;y#@e3!{bEw*Kc)sy}KJ3O-xw~WIhuoLikuFxVE1g$LrO<2m9@3>8veW%i}aaIP% zg{B^D%fF;l+@slba`9^DeV0!LoHDIzv-48SHRiVwUYc_>Ql5inI$BqJi^=iDw?#|S zM?Zv3;HAJ9^Q+riW^SU*-X)mlhA_>#Iv{9FoM(84-|6SMfYiot6D?ijuMv8|9rp{y z85VZX30xvHIq!jQ4__={XAx}9$jUz=(Yxs%DW_hUOWm>FB~Mt;_JV@DW~>Blb`s&3 z&w`dr#Wp!e|1PPMSTx+`f87nsK>x`C{uefmfA!=3A4!*-fjPj?Q5fK0Xm4iwb+!N7 z45C!mRh0n9pO)dS3LA(Q4W6Dx!7U?}kGY;ktfJm0(xi z;-gi7bQ0=S`GS3~fiy*&q5fJ)BQpoJeC!KC0a}7Az3q##0&0d{*$qO&0O=khxSHzs2y_O*Y-~uXla#C15=gHEDTWXakCH2poD03qkk{=8 zj2La$6Ua78x=Z{d?uvDZxFysQsMzNWRwKn8K?`|pm=g#$L$J%_DEW-dMWQuC3#qHe z-%K9&K|FgBI}eqdn@u;#{!4co7kb zv69OJEK$cRyiF*kd>22%glvLA8&;X^R8nb)NPcT%eC6b)@Q>n4M%$V=n;g)$cWDPD z4s3&uGgOi5n1@?$ef0zYP7**dK8cS`&M$~hwMoC6;}V(XZsxurIa$Uk3%~|lg`MV; z74*laSnKtD#%0n~f+NVV(N>>er&(DU;Ythw4E8v?dd9B)uaDJ>#z zC79vRPvtOpLD>dO6%E4x9?3?nus}*ommXGvmb*U_9ASGl@3VW@1RK%Eqw(=}Z053#62|#K|u1rS${y z@%P&+Y+MEA`4a6}!`WK;R1!yG#(U5PN>&Y{u9h6QwAzkWK$SAb6meU5v-13cp4x`K z5HsV5Q^jo!+!l&LGVNOopI1B>1Cuc0sM3rAMKO@A@y(CueG!eE1ZY#HR!#W~}?ic@=~<#uKHCb5V| z(_2y0Q_noq`(XyAeNBk~Wa!^xLglm}3rJ8s=WXxp{y9n+t|e;qVI^Z~FObYjrk*^Q zx-~<~bpJ6!pdhKS`=w}HT3`o7j}5n+m;v$(0lB0WK!Yz&kkq&W2{B3W!v!n$XfJ}a zS-nQUoBGYsi5rDB9e5=Ag`t=~IH(n+lEKsLEUZy*s~It9KRsgGFp zcQ|nJsLu!uL%T-fUlSZd#FQNSxwj@2K06qM4@}v8f~Xd|x#kBmP!mPU-d!DhZD z7$fV@WhJNHJl1H{-HqY?e2wbJF*v5yX;FPnXqdSJQ6ImubyE%$^Yk*U@(hIPgOF5H z6TLWqSzv!hO6BvB6dEyAk3@7Eg5jVVO2c@WuBo82wIvnfjC zHGY`Wd=YE2_QPwK;W~eFFF%5`!+Vp3Z>XudH(}k+ulR!ek69M!f01SV9a4(a zpgolillYD^hP%?V5E=35M~L)%iJ2t6As9i3LqOJi@%8NLDU&8-{LR zQG&JEvS?M>wr$(CZQC|0ZQHh;m9}l$wsEt2_rB*m+`q?tSnq4h&k->ra`!4IY;e~% zm7UIQ+)4BrFWR(Io&&Bu)~T#(Se7+gI8Xn5HaVIy!h6y5;hlQky#JhjopROL zW_7^>L*aQ4(IgyL7pRxX1m(wY0bri||6>Xl~-F9H4yyU$WQiZkPk zw9`jDoDeDng1vt~*kQMn?RYehD*e8d?S3a{98n*rV|Uc(aqlPRDffcBH26o|Xae`& zH2cG2mFXq+%1@}$`4u2_Wb@kciT7f=otM`NWrgT#rz_zF^27lbXU~v(3wq$CP3D8Ot%xh{) zreB&PHf{n#E&{|e8KiHb7Q$3vE)RN>5HTG8+TKDd#!4a>9Yg(Hm>1Za65K9rHml~XO!DUjIH~Hv@xYIo_Yd_h zUXJfR2d4sK6F!rFEgQFd4&pLA)}a807hxr3!1M z0GSn7@M5u%X#|GmpgG?Q7PP}jPZgwerHHSSjD*}i6X}Ov)IbJXYXLmgWSkLIE(0pQ z@8Z~^^KxaGxtjK}l&RXGro8BIfn-NBA(}A%o}nVqY$8-p>a-~hzug%k zkD@r8LcS*J+<*~m5*oYT&X`Ua@#hzxKvbME;L;0c2+Mwco~z&!gf103lKC?r@!78~ z6|skGdg|KqGM@sl=3lE)?IV6-Y`d_*qltYgO;HSaB$pE6&}CT`sg0MkJ4W<3dZKDt77@gNTP(;iz=T_`0U