Structured Gotos are (Slightly) Harmful
Sennesh, E., & Gil, Y. (2016). Structured gotos are (slightly) harmful. In Proceedings of the 31st Annual ACM Symposium on Applied Computing (pp. 1784–1789).
Sennesh, E., & Gil, Y. (2016). Structured gotos are (slightly) harmful. In Proceedings of the 31st Annual ACM Symposium on Applied Computing (pp. 1784–1789).
Nicolas Blanchard, Leila Gabasova, Ted Selker, Eli Sennesh. Créer de tête de nombreux mots de passe inviolables et inoubliables. ALGOTEL 2018 - 20èmes Rencontres Francophones sur les Aspects Algorithmiques des Télécommunications, May 2018, Roscoff, France. 2018.
Sennesh, E., Wu, H., & van de Meent, J.-W. (2018). Combinators for Modeling and Inference.
Sennesh, E., Scibior, A., Wu, H., & van de Meent, J.-W. (2018). Composing Modeling and Inference Operations with Probabilistic Program Combinators.
@InProceedings{pmlr-v119-wu20h, title = {Amortized Population {G}ibbs Samplers with Neural Sufficient Statistics}, author = {Wu, Hao and Zimmermann, Heiko and Sennesh, Eli and Le, Tuan Anh and Van De Meent, Jan-Willem}, booktitle = {Proceedings of the 37th International Conference on Machine Learning}, pages = {10421--10431}, year = {2020}, editor = {III, Hal Daumé and Singh, Aarti}, volume = {119}, series = {Proceedings of Machine Learning Research}, month = {13--18 Jul}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v119/wu20h/wu20h.pdf}, url = {https://proceedings.mlr.press/v119/wu20h.html}, abstract = {We develop amortized population Gibbs (APG) samplers, a class of scalable methods that frame structured variational inference as adaptive importance sampling. APG samplers construct high-dimensional proposals by iterating over updates to lower-dimensional blocks of variables. We train each conditional proposal by minimizing the inclusive KL divergence with respect to the conditional posterior. To appropriately account for the size of the input data, we develop a new parameterization in terms of neural sufficient statistics. Experiments show that APG samplers can be used to train highly-structured deep generative models in an unsupervised manner, and achieve substantial improvements in inference accuracy relative to standard autoencoding variational methods.} }
@article{sennesh2020learning, title={Learning a Deep Generative Model like a Program: the Free Category Prior}, author={Sennesh, Eli}, journal={arXiv preprint arXiv:2011.11063}, year={2020} }
@inproceedings{NEURIPS2020_8c3c27ac, author = {Sennesh, Eli and Khan, Zulqarnain and Wang, Yiyu and Hutchinson, J Benjamin and Satpute, Ajay and Dy, Jennifer and van de Meent, Jan-Willem}, booktitle = {Advances in Neural Information Processing Systems}, editor = {H. Larochelle and M. Ranzato and R. Hadsell and M.F. Balcan and H. Lin}, pages = {12046--12056}, publisher = {Curran Associates, Inc.}, title = {Neural Topographic Factor Analysis for fMRI Data}, url = {https://proceedings.neurips.cc/paper/2020/file/8c3c27ac7d298331a1bdfd0a5e8703d3-Paper.pdf}, volume = {33}, year = {2020} }
@InProceedings{pmlr-v161-stites21a, title = {Learning proposals for probabilistic programs with inference combinators}, author = {Stites, Sam and Zimmermann, Heiko and Wu, Hao and Sennesh, Eli and van de Meent, Jan-Willem}, booktitle = {Proceedings of the Thirty-Seventh Conference on Uncertainty in Artificial Intelligence}, pages = {1056--1066}, year = {2021}, editor = {de Campos, Cassio and Maathuis, Marloes H.}, volume = {161}, series = {Proceedings of Machine Learning Research}, month = {27--30 Jul}, publisher = {PMLR}, pdf = {https://proceedings.mlr.press/v161/stites21a/stites21a.pdf}, url = {https://proceedings.mlr.press/v161/stites21a.html}, abstract = {We develop operators for construction of proposals in probabilistic programs, which we refer to as inference combinators. Inference combinators define a grammar over importance samplers that compose primitive operations such as application of a transition kernel and importance resampling. Proposals in these samplers can be parameterized using neural networks, which in turn can be trained by optimizing variational objectives. The result is a framework for user-programmable variational methods that are correct by construction and can be tailored to specific models. We demonstrate the flexibility of this framework by implementing advanced variational methods based on amortized Gibbs sampling and annealing.} }
@article{sennesh2022interoception, title={Interoception as modeling, allostasis as control}, author={Sennesh, Eli and Theriault, Jordan and Brooks, Dana and van de Meent, Jan-Willem and Barrett, Lisa Feldman and Quigley, Karen S}, journal={Biological Psychology}, volume={167}, pages={108242}, year={2022}, publisher={Elsevier} }
@article{khan2022computational, title={A computational neural model for mapping degenerate neural architectures}, author={Khan, Zulqarnain and Wang, Yiyu and Sennesh, Eli and Dy, Jennifer and Ostadabbas, Sarah and van de Meent, Jan-Willem and Hutchinson, J Benjamin and Satpute, Ajay B}, journal={Neuroinformatics}, pages={1--15}, year={2022}, publisher={Springer} }
@article{sennesh2022probabilistic, title={A Probabilistic Generative Model of Free Categories}, author={Sennesh, Eli and Xu, Tom and Maruyama, Yoshihiro}, journal={arXiv preprint arXiv:2205.04545}, year={2022} }
Paper presentation at Psychologists, Engineers, and Neuroscientists seminar, Northeastern University, Boston MA, USA
Research talk at Psychologists, Engineers, and Neuroscientists seminar, Northeastern University, Boston MA, USA