@inProceedings{VenVer04-ICLP04-IC, author = {J. Vennekens and S. Verbaeten and M. Bruynooghe}, title = {Logic Programs With Annotated Disjunctions}, booktitle = {International Conference on Logic Programming}, year = {2004}, series = {LNCS}, volume = {3131}, publisher = {Springer}, pages = {195-209} } @INPROCEEDINGS{DBLP:conf/ijcai/RaedtKT07, author = {De Raedt, L. and A. Kimmig and H. Toivonen}, title = {{ProbLog}: A Probabilistic {P}rolog and Its Application in Link Discovery.}, booktitle = {International Joint Conference on Artificial Intelligence}, year = {2007}, pages = {2462-2467}, bibsource = {DBLP, http://dblp.uni-trier.de}, ee = {http://www.ijcai.org/papers07/Papers/IJCAI07-396.pdf} } @Article{Nitti2016, author = {Nitti, Davide and De Laet, Tinne and De Raedt, Luc}, title="Probabilistic logic programming for hybrid relational domains", journal= "Mach. Learn.", year="2016", volume="103", number="3", pages="407--449", abstract="We introduce a probabilistic language and an efficient inference algorithm based on distributional clauses for static and dynamic inference in hybrid relational domains. Static inference is based on sampling, where the samples represent (partial) worlds (with discrete and continuous variables). Furthermore, we use backward reasoning to determine which facts should be included in the partial worlds. For filtering in dynamic models we combine the static inference algorithm with particle filters and guarantee that the previous partial samples can be safely forgotten, a condition that does not hold in most logical filtering frameworks. Experiments show that the proposed framework can outperform classic sampling methods for static and dynamic inference and that it is promising for robotics and vision applications. In addition, it provides the correct results in domains in which most probabilistic programming languages fail.", issn="1573-0565", doi="10.1007/s10994-016-5558-8", url="http://dx.doi.org/10.1007/s10994-016-5558-8" } @article{DBLP:journals/ai/Poole97, author = {David Poole}, title = {The Independent Choice Logic for Modelling Multiple Agents Under Uncertainty}, journal = {Artificial Intelligence}, volume = {94}, number = {1-2}, year = {1997}, pages = {7-56}, ee = {http://dx.doi.org/10.1016/S0004-3702(97)00027-1}, bibsource = {DBLP, http://dblp.uni-trier.de} } @article{DBLP:journals/jair/SatoK01, author = {Taisuke Sato and Yoshitaka Kameya}, title = {Parameter Learning of Logic Programs for Symbolic-Statistical Modeling}, journal = {J. Artif. Intell. Res.}, volume = {15}, year = {2001}, pages = {391-454}, ee = {http://www.cs.washington.edu/research/jair/abstracts/sato01a.html}, bibsource = {DBLP, http://dblp.uni-trier.de} } @article{TLP:8688161, author = {Islam,Muhammad Asiful and Ramakrishnan,CR and Ramakrishnan,IV}, title = {Inference in probabilistic logic programs with continuous random variables}, journal = {Theory and Practice of Logic Programming}, volume = {12}, issue = {Special Issue 4-5}, month = {7}, year = {2012}, issn = {1475-3081}, pages = {505--523}, numpages = {19}, doi = {10.1017/S1471068412000154}, } @inproceedings{RigSwi10-ICLP10-IC, author = {Fabrizio Riguzzi and Terrance Swift}, title = {{T}abling and {A}nswer {S}ubsumption for {R}easoning on {L}ogic {P}rograms with {A}nnotated {D}isjunctions}, booktitle = {Technical Communications of the International Conference on Logic Programming}, volume = {7}, year = {2010}, publisher = {Schloss Dagstuhl--Leibniz-Zentrum fuer Informatik}, series = {Leibniz International Proceedings in Informatics (LIPIcs)}, ISBN = {978-3-939897-17-0}, ISSN = {1868-8969}, pages = {162--171}, doi = {10.4230/LIPIcs.ICLP.2010.162} } @article{Rig13-FI-IJ, author = {Fabrizio Riguzzi}, title = {{MCINTYRE}: A {Monte Carlo} System for Probabilistic Logic Programming}, journal = {Fundamenta Informaticae}, abstract = {Probabilistic Logic Programming is receiving an increasing attention for its ability to model domains with complex and uncertain relations among entities. In this paper we concentrate on the problem of approximate inference in probabilistic logic programming languages based on the distribution semantics. A successful approximate approach is based on Monte Carlo sampling, that consists in verifying the truth of the query in a normal program sampled from the probabilistic program. The ProbLog system includes such an algorithm and so does the cplint suite. In this paper we propose an approach for Monte Carlo inference that is based on a program transformation that translates a probabilistic program into a normal program to which the query can be posed. The current sample is stored in the internal database of the Yap Prolog engine. The resulting system, called MCINTYRE for Monte Carlo INference wiTh Yap REcord, is evaluated on various problems: biological networks, artificial datasets and a hidden Markov model. MCINTYRE is compared with the Monte Carlo algorithms of ProbLog and and with the exact inference of the PITA system. The results show that MCINTYRE is faster than the other Monte Carlo systems.}, keywords = {Probabilistic Logic Programming, Monte Carlo Methods, Logic Programs with Annotated Disjunctions, ProbLog}, year = {2013}, publisher = {{IOS} Press}, url = {http://ds.ing.unife.it/~friguzzi/Papers/Rig13-FI-IJ.pdf}, volume = {124}, number = {4}, pages = {521-541}, copyright = {IOS Press}, doi = {10.3233/FI-2013-847} } @article{von195113, title={Various Techniques Used in Connection With Random Digits}, author={Von Neumann, John}, year={1951}, journal={Nat. Bureau Stand. Appl. Math. Ser.}, volume={12}, pages={36-38}, } @article{nampally2014adaptive, title={Adaptive MCMC-Based Inference in Probabilistic Logic Programs}, author={Nampally, Arun and Ramakrishnan, CR}, journal={arXiv preprint arXiv:1403.6036}, year={2014}, url={http://arxiv.org/pdf/1403.6036.pdf} } @inproceedings{fung1990weighing, title={Weighing and Integrating Evidence for Stochastic Simulation in Bayesian Networks}, author={Fung, Robert M and Chang, Kuo-Chu}, booktitle={Fifth Annual Conference on Uncertainty in Artificial Intelligence}, pages={209--220}, year={1990}, organization={North-Holland Publishing Co.} } @BOOK{Pea00-book, title = {Causality}, publisher = {Cambridge University Press}, year = {2000}, author = {Pearl, J.}, } @article{BelRig13-TPLP-IJ, author = {Elena Bellodi and Fabrizio Riguzzi}, title = {Structure Learning of Probabilistic Logic Programs by Searching the Clause Space}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press}, copyright = {Cambridge University Press}, year = {2015}, volume = {15}, number = {2}, pages = {169-212}, url = {http://arxiv.org/abs/1309.2080}, pdf = {http://journals.cambridge.org/abstract_S1471068413000689}, keywords = {probabilistic inductive logic programming, statistical relational learning, structure learning, distribution semantics, logic programs with annotated disjunction, CP-logic}, abstract = {Learning probabilistic logic programming languages is receiving an increasing attention, and systems are available for learning the parameters (PRISM, LeProbLog, LFI-ProbLog and EMBLEM) or both structure and parameters (SEM-CP-logic and SLIPCASE) of these languages. In this paper we present the algorithm SLIPCOVER for "Structure LearnIng of Probabilistic logic programs by searChing OVER the clause space." It performs a beam search in the space of probabilistic clauses and a greedy search in the space of theories using the log likelihood of the data as the guiding heuristics. To estimate the log likelihood, SLIPCOVER performs Expectation Maximization with EMBLEM. The algorithm has been tested on five real world datasets and compared with SLIPCASE, SEM-CP-logic, Aleph and two algorithms for learning Markov Logic Networks (Learning using Structural Motifs (LSM) and ALEPH++ExactL1). SLIPCOVER achieves higher areas under the precision-recall and receiver operating characteristic curves in most cases.}, doi = {10.1017/S1471068413000689} } @article{DiMBelRig15-ML-IJ, author = {Di Mauro, Nicola and Elena Bellodi and Fabrizio Riguzzi}, title = {Bandit-Based {Monte-Carlo} Structure Learning of Probabilistic Logic Programs}, journal = {Mach. Learn.}, publisher = {Springer International Publishing}, copyright = {Springer International Publishing}, year = {2015}, volume = {100}, number = {1}, pages = {127-156}, month = {July}, doi = {10.1007/s10994-015-5510-3}, url = {http://ds.ing.unife.it/~friguzzi/Papers/DiMBelRig-ML15.pdf}, keywords = {probabilistic inductive logic programming, statistical relational learning, structure learning, distribution semantics, logic programs with annotated disjunction}, abstract = {Probabilistic Logic Programming can be used to model domains with complex and uncertain relationships among entities. While the problem of learning the parameters of such programs has been considered by various authors, the problem of learning the structure is yet to be explored in depth. In this work we present an approximate search method based on a one-player game approach, called LEMUR. It sees the problem of learning the structure of a probabilistic logic program as a multiarmed bandit problem, relying on the Monte-Carlo tree search UCT algorithm that combines the precision of tree search with the generality of random sampling. LEMUR works by modifying the UCT algorithm in a fashion similar to FUSE, that considers a finite unknown horizon and deals with the problem of having a huge branching factor. The proposed system has been tested on various real-world datasets and has shown good performance with respect to other state of the art statistical relational learning approaches in terms of classification abilities.}, } @inproceedings{RaeLae95-ALT95, Author = "De Raedt, L. and Van Laer, W.", Title = "Inductive constraint logic", Booktitle = {Proceedings of the 6th Conference on Algorithmic Learning Theory (ALT 1995)}, address = { Fukuoka, Japan}, Series = {LNAI}, Volume = 997, Publisher = {Springer}, Year = {1995}, pages = {80-94}, } @article{DBLP:journals/ai/Cohen95, author = {William W. Cohen}, title = {Pac-Learning Non-Recursive Prolog Clauses}, journal = {Artif. Intell.}, volume = {79}, number = {1}, year = {1995}, pages = {1-38}, ee = {http://dx.doi.org/10.1016/0004-3702(94)00034-4}, bibsource = {DBLP, http://dblp.uni-trier.de} } @techreport{VenVer03-TR, author = {J. Vennekens and S. Verbaeten}, title = {Logic Programs With Annotated Disjunctions}, year = {2003}, institution = {K. U. Leuven}, number = {CW386} } @INPROCEEDINGS{VenDenBru-JELIA06, author = {J. Vennekens and M. Denecker and M. Bruynooghe}, title = {Representing Causal Information about a Probabilistic Process}, booktitle = {Proceedings of the 10th European Conference on Logics in Artificial Intelligence}, year = {2006}, series = {LNAI}, month = {September}, publisher = {Springer} } @article{DBLP:journals/tplp/VennekensDB09, author = {J. Vennekens and Marc Denecker and Maurice Bruynooghe}, title = {{CP}-logic: A language of causal probabilistic events and its relation to logic programming}, journal = {Theory Pract. Log. Program.}, volume = {9}, number = {3}, year = {2009}, pages = {245-308}, ee = {http://dx.doi.org/10.1017/S1471068409003767}, bibsource = {DBLP, http://dblp.uni-trier.de} } @inproceedings{van2010dtproblog, title={DTProbLog: A decision-theoretic probabilistic Prolog}, author={Van den Broeck, Guy and Thon, Ingo and Van Otterlo, Martijn and De Raedt, Luc}, booktitle={Twenty-Fourth AAAI Conference on Artificial Intelligence}, year={2010} } @inproceedings{NguLamRig17-PLP-IW, author = {Arnaud {Nguembang Fadja} and Evelina Lamma and Fabrizio Riguzzi}, title = {Deep Probabilistic Logic Programming}, booktitle ={PLP17_B}, editor = {Christian {Theil Have} and Riccardo Zese}, year = {2017}, pdf = {http://ceur-ws.org/Vol-1916/paper1.pdf}, volume = 1916, series = {CEUR_S}, issn = {1613-0073}, publisher = {Sun {SITE} Central Europe}, pages = {3-14}, abstract = {Probabilistic logic programming under the distribution semantics has been very useful in machine learning. However, inference is expensive so machine learning algorithms may turn out to be slow. In this paper we consider a restriction of the language called hierarchical PLP in which clauses and predicates are hierarchically organized. In this case the language becomes truth-functional and inference reduces to the evaluation of formulas in the product fuzzy logic. Programs in this language can also be seen as arithmetic circuits or deep neural networks and inference can be reperformed quickly when the parameters change. Learning can then be performed by EM or backpropagation.}, keywords = {Probabilistic Logic Programming, Distribution Semantics, Deep Neural Networks, Arithmetic Circuits}, scopus = {2-s2.0-85030091907}, venue = {Orleans, FR}, eventdate = {2017-09-07} } @inproceedings{kok2005learning, title={Learning the structure of Markov logic networks}, author={Kok, Stanley and Domingos, Pedro}, booktitle={Proceedings of the 22nd international conference on Machine learning}, pages={441--448}, year={2005} } @article{Rig14-CJ-IJ, author = {Fabrizio Riguzzi}, title = {Speeding Up Inference for Probabilistic Logic Programs}, journal = {CJ_J}, publisher = {OUP_P}, copyright = {Oxford University Press}, year = {2014}, volume = {57}, number = {3}, pages = {347-363}, doi = {10.1093/comjnl/bxt096}, abstract = {Probabilistic Logic Programming (PLP) allows to represent domains containing many entities connected by uncertain relations and has many applications in particular in Machine Learning. PITA is a PLP algorithm for computing the probability of queries that exploits tabling, answer subsumption and Binary Decision Diagrams (BDDs). PITA does not impose any restriction on the programs. Other algorithms, such as PRISM, reduce computation time by imposing restrictions on the program, namely that subgoals are independent and that clause bodies are mutually exclusive. Another assumption that simplifies inference is that clause bodies are independent. In this paper we present the algorithms PITA(IND,IND) and PITA(OPT). PITA(IND,IND) assumes that subgoals and clause bodies are independent. PITA(OPT) instead first checks whether these assumptions hold for subprograms and subgoals: if they do, PITA(OPT) uses a simplified calculation, otherwise it resorts to BDDs. Experiments on a number of benchmark datasets show that PITA(IND,IND) is the fastest on datasets respecting the assumptions while PITA(OPT) is a good option when nothing is known about a dataset.}, keywords = {Logic Programming, Probabilistic Logic Programming, Distribution Semantics, Logic Programs with Annotated Disjunctions, PRISM, ProbLog} } @inproceedings{NguRigLam18-PLP-IW, title={Learning the Parameters of Deep Probabilistic Logic Programs }, booktitle = {Probabilistic Logic Programming (PLP 2018)}, year = 2018, author = { Nguembang Fadja, Arnaud and Fabrizio Riguzzi and Evelina Lamma }, editor = {Elena Bellodi and Tom Schrijvers }, volume = {2219}, series = {CEUR Workshop Proceedings}, publisher={Sun {SITE} Central Europe}, address={Aachen, Germany}, issn = {1613-0073}, venue = {Ferrara, Italy}, eventdate = {September 1, 2018}, copyright={by the authors}, pages={9-14}, url = {http://ceur-ws.org/Vol-2219/paper2.pdf}, } @inproceedings{fadja2018expectation, title={Expectation Maximization in Deep Probabilistic Logic Programming}, author={Nguembang Fadja, Arnaud and Riguzzi, Fabrizio and Lamma, Evelina}, booktitle={International Conference of the Italian Association for Artificial Intelligence}, pages={293--306}, year={2018}, organization={Springer} } @article{fadja2021learning, title={Learning hierarchical probabilistic logic programs}, author={Fadja, Arnaud Nguembang and Riguzzi, Fabrizio and Lamma, Evelina}, journal={Machine Learning}, pages={1--57}, year={2021}, publisher={Springer} }