@inproceedings{guhe_effectiveness_2014, address = {Dortmund, Germany}, title = {The effectiveness of persuasion in {The} {Settlers} of {Catan}}, isbn = {978-1-4799-3547-5}, url = {http://ieeexplore.ieee.org/document/6932861/}, doi = {10.1109/CIG.2014.6932861}, urldate = {2020-07-20}, booktitle = {2014 {IEEE} {Conference} on {Computational} {Intelligence} and {Games}}, publisher = {IEEE}, author = {Guhe, Markus and Lascarides, Alex}, month = aug, year = {2014}, pages = {1--8}, file = {Submitted Version:/home/nemo/Zotero/storage/ITK52TEL/Guhe and Lascarides - 2014 - The effectiveness of persuasion in The Settlers of.pdf:application/pdf;Submitted Version:/home/nemo/Zotero/storage/HTNFAWSA/Guhe and Lascarides - 2014 - The effectiveness of persuasion in The Settlers of.pdf:application/pdf} } @inproceedings{guhe_effectiveness_2014-1, title = {The effectiveness of persuasion in {The} {Settlers} of {Catan}}, url = {https://doi.org/10.1109%2Fcig.2014.6932861}, doi = {10.1109/cig.2014.6932861}, booktitle = {2014 {IEEE} {Conference} on {Computational} {Intelligence} and {Games}}, publisher = {IEEE}, author = {Guhe, Markus and Lascarides, Alex}, month = aug, year = {2014}, file = {Submitted Version:/home/nemo/Zotero/storage/WJWTC8A9/Guhe and Lascarides - 2014 - The effectiveness of persuasion in The Settlers of.pdf:application/pdf} } @article{boda_avoiding_2018, title = {Avoiding {Revenge} {Using} {Optimal} {Opponent} {Ranking} {Strategy} in the {Board} {Game} {Catan}}, volume = {10}, url = {https://doi.org/10.4018%2Fijgcms.2018040103}, doi = {10.4018/ijgcms.2018040103}, number = {2}, journal = {International Journal of Gaming and Computer-Mediated Simulations}, author = {Boda, Márton Attila}, month = apr, year = {2018}, note = {Publisher: IGI Global}, pages = {47--70}, file = {Full Text:/home/nemo/Zotero/storage/C8XZ64B8/Boda - 2018 - Avoiding Revenge Using Optimal Opponent Ranking St.pdf:application/pdf} } @inproceedings{guhe_game_2014, title = {Game strategies for {The} {Settlers} of {Catan}}, url = {https://doi.org/10.1109%2Fcig.2014.6932884}, doi = {10.1109/cig.2014.6932884}, booktitle = {2014 {IEEE} {Conference} on {Computational} {Intelligence} and {Games}}, publisher = {IEEE}, author = {Guhe, Markus and Lascarides, Alex}, month = aug, year = {2014}, file = {Submitted Version:/home/nemo/Zotero/storage/E7ARZVSI/Guhe and Lascarides - 2014 - Game strategies for The Settlers of Catan.pdf:application/pdf} } @incollection{szita_monte-carlo_2010, title = {Monte-{Carlo} {Tree} {Search} in {Settlers} of {Catan}}, url = {https://doi.org/10.1007%2F978-3-642-12993-3_3}, booktitle = {Lecture {Notes} in {Computer} {Science}}, publisher = {Springer Berlin Heidelberg}, author = {Szita, István and Chaslot, Guillaume and Spronck, Pieter}, year = {2010}, doi = {10.1007/978-3-642-12993-3_3}, pages = {21--32}, file = {Full Text:/home/nemo/Zotero/storage/T5DQM5GD/Szita et al. - 2010 - Monte-Carlo Tree Search in Settlers of Catan.pdf:application/pdf} } @incollection{xenou_deep_2019, title = {Deep {Reinforcement} {Learning} in {Strategic} {Board} {Game} {Environments}}, url = {https://doi.org/10.1007%2F978-3-030-14174-5_16}, booktitle = {Multi-{Agent} {Systems}}, publisher = {Springer International Publishing}, author = {Xenou, Konstantia and Chalkiadakis, Georgios and Afantenos, Stergos}, year = {2019}, doi = {10.1007/978-3-030-14174-5_16}, pages = {233--248}, file = {Accepted Version:/home/nemo/Zotero/storage/MXL35B77/Xenou et al. - 2019 - Deep Reinforcement Learning in Strategic Board Gam.pdf:application/pdf} } @article{maliphant_mini-risk_1990, title = {Mini-{Risk}: {Strategies} for a {Simplified} {Board} {Game}}, volume = {41}, url = {https://doi.org/10.1057%2Fjors.1990.2}, doi = {10.1057/jors.1990.2}, number = {1}, journal = {Journal of the Operational Research Society}, author = {Maliphant, Sarah A. and Smith, David K.}, month = jan, year = {1990}, note = {Publisher: Informa UK Limited}, pages = {9--16}, file = {Full Text:/home/nemo/Zotero/storage/X6ZHBQT8/Maliphant and Smith - 1990 - Mini-Risk Strategies for a Simplified Board Game.pdf:application/pdf} } @inproceedings{neves_learning_2002, title = {Learning the risk board game with classifier systems}, url = {https://doi.org/10.1145%2F508791.508904}, doi = {10.1145/508791.508904}, booktitle = {Proceedings of the 2002 {ACM} symposium on {Applied} computing - {SAC} {\textbackslash}textquotesingle02}, publisher = {ACM Press}, author = {Neves, Atila and Brasāo, Osvaldo and Rosa, Agostinho}, year = {2002}, file = {Full Text:/home/nemo/Zotero/storage/V8H6XI4Y/Neves et al. - 2002 - Learning the risk board game with classifier syste.pdf:application/pdf} } @article{tan_markov_1997, title = {Markov {Chains} and the {RISK} {Board} {Game}}, volume = {70}, url = {https://doi.org/10.1080%2F0025570x.1997.11996573}, doi = {10.1080/0025570x.1997.11996573}, number = {5}, journal = {Mathematics Magazine}, author = {Tan, Bariş}, month = dec, year = {1997}, note = {Publisher: Informa UK Limited}, pages = {349--357}, file = {Full Text:/home/nemo/Zotero/storage/ZS9TP9BZ/Tan - 1997 - Markov Chains and the RISK Board Game.pdf:application/pdf} } @article{osborne_markov_2003, title = {Markov {Chains} for the {RISK} {Board} {Game} {Revisited}}, volume = {76}, url = {https://doi.org/10.1080%2F0025570x.2003.11953165}, doi = {10.1080/0025570x.2003.11953165}, number = {2}, journal = {Mathematics Magazine}, author = {Osborne, Jason A.}, month = apr, year = {2003}, note = {Publisher: Informa UK Limited}, pages = {129--135}, file = {Full Text:/home/nemo/Zotero/storage/FI6LTX8L/Osborne - 2003 - Markov Chains for the RISK Board Game Revisited.pdf:application/pdf} } @article{vaccaro_planning_2005, title = {Planning an {Endgame} {Move} {Set} for the {Game} {RISK}: {A} {Comparison} of {Search} {Algorithms}}, volume = {9}, url = {https://doi.org/10.1109%2Ftevc.2005.856211}, doi = {10.1109/tevc.2005.856211}, number = {6}, journal = {IEEE Trans. Evol. Computat.}, author = {Vaccaro, J. M. and Guest, C. C.}, month = dec, year = {2005}, note = {Publisher: Institute of Electrical and Electronics Engineers (IEEE)}, pages = {641--652}, file = {Full Text:/home/nemo/Zotero/storage/8Q7W86Z4/Vaccaro and Guest - 2005 - Planning an Endgame Move Set for the Game RISK A .pdf:application/pdf} } @inproceedings{gedda_monte_2018, title = {Monte {Carlo} {Methods} for the {Game} {Kingdomino}}, url = {https://doi.org/10.1109%2Fcig.2018.8490419}, doi = {10.1109/cig.2018.8490419}, booktitle = {2018 {IEEE} {Conference} on {Computational} {Intelligence} and {Games} ({CIG})}, publisher = {IEEE}, author = {Gedda, Magnus and Lagerkvist, Mikael Z. and Butler, Martin}, month = aug, year = {2018}, file = {Submitted Version:/home/nemo/Zotero/storage/BKV7VG59/Gedda et al. - 2018 - Monte Carlo Methods for the Game Kingdomino.pdf:application/pdf} } @article{cox_how_2015, title = {How to {Make} the {Perfect} {Fireworks} {Display}: {Two} {Strategies} {forHanabi}}, volume = {88}, url = {https://doi.org/10.4169%2Fmath.mag.88.5.323}, doi = {10.4169/math.mag.88.5.323}, number = {5}, journal = {Mathematics Magazine}, author = {Cox, Christopher and Silva, Jessica De and Deorsey, Philip and Kenter, Franklin H. J. and Retter, Troy and Tobin, Josh}, month = dec, year = {2015}, note = {Publisher: Informa UK Limited}, pages = {323--336}, file = {Full Text:/home/nemo/Zotero/storage/E7PR5FAI/Cox et al. - 2015 - How to Make the Perfect Fireworks Display Two Str.pdf:application/pdf} } @inproceedings{walton-rivers_evaluating_2017, title = {Evaluating and modelling {Hanabi}-playing agents}, url = {https://doi.org/10.1109%2Fcec.2017.7969465}, doi = {10.1109/cec.2017.7969465}, booktitle = {2017 {IEEE} {Congress} on {Evolutionary} {Computation} ({CEC})}, publisher = {IEEE}, author = {Walton-Rivers, Joseph and Williams, Piers R. and Bartle, Richard and Perez-Liebana, Diego and Lucas, Simon M.}, month = jun, year = {2017}, file = {Accepted Version:/home/nemo/Zotero/storage/6LVCR5LJ/Walton-Rivers et al. - 2017 - Evaluating and modelling Hanabi-playing agents.pdf:application/pdf} } @article{bard_hanabi_2020, title = {The {Hanabi} challenge: {A} new frontier for {AI} research}, volume = {280}, url = {https://doi.org/10.1016%2Fj.artint.2019.103216}, doi = {10.1016/j.artint.2019.103216}, journal = {Artificial Intelligence}, author = {Bard, Nolan and Foerster, Jakob N. and Chandar, Sarath and Burch, Neil and Lanctot, Marc and Song, H. Francis and Parisotto, Emilio and Dumoulin, Vincent and Moitra, Subhodeep and Hughes, Edward and Dunning, Iain and Mourad, Shibl and Larochelle, Hugo and Bellemare, Marc G. and Bowling, Michael}, month = mar, year = {2020}, note = {Publisher: Elsevier BV}, pages = {103216}, file = {Full Text:/home/nemo/Zotero/storage/QK4PLTNC/Bard et al. - 2020 - The Hanabi challenge A new frontier for AI resear.pdf:application/pdf} } @inproceedings{walton-rivers_2018_2019, title = {The 2018 {Hanabi} competition}, url = {https://doi.org/10.1109%2Fcig.2019.8848008}, doi = {10.1109/cig.2019.8848008}, booktitle = {2019 {IEEE} {Conference} on {Games} ({CoG})}, publisher = {IEEE}, author = {Walton-Rivers, Joseph and Williams, Piers R. and Bartle, Richard}, month = aug, year = {2019}, file = {Accepted Version:/home/nemo/Zotero/storage/EG5MFSFH/Walton-Rivers et al. - 2019 - The 2018 Hanabi competition.pdf:application/pdf} } @inproceedings{canaan_diverse_2019, title = {Diverse {Agents} for {Ad}-{Hoc} {Cooperation} in {Hanabi}}, url = {https://doi.org/10.1109%2Fcig.2019.8847944}, doi = {10.1109/cig.2019.8847944}, booktitle = {2019 {IEEE} {Conference} on {Games} ({CoG})}, publisher = {IEEE}, author = {Canaan, Rodrigo and Togelius, Julian and Nealen, Andy and Menzel, Stefan}, month = aug, year = {2019}, file = {Submitted Version:/home/nemo/Zotero/storage/9WT5YA3E/Canaan et al. - 2019 - Diverse Agents for Ad-Hoc Cooperation in Hanabi.pdf:application/pdf} } @article{ash_monopoly_1972, title = {Monopoly as a {Markov} {Process}}, volume = {45}, url = {https://doi.org/10.1080%2F0025570x.1972.11976187}, doi = {10.1080/0025570x.1972.11976187}, number = {1}, journal = {Mathematics Magazine}, author = {Ash, Robert B. and Bishop, Richard L.}, month = jan, year = {1972}, note = {Publisher: Informa UK Limited}, pages = {26--29}, file = {Submitted Version:/home/nemo/Zotero/storage/KZZXN75I/Ash and Bishop - 1972 - Monopoly as a Markov Process.pdf:application/pdf} } @article{cowling_ensemble_2012, title = {Ensemble {Determinization} in {Monte} {Carlo} {Tree} {Search} for the {Imperfect} {Information} {Card} {Game} {Magic}: {The} {Gathering}}, volume = {4}, url = {https://doi.org/10.1109%2Ftciaig.2012.2204883}, doi = {10.1109/tciaig.2012.2204883}, number = {4}, journal = {IEEE Trans. Comput. Intell. AI Games}, author = {Cowling, Peter I. and Ward, Colin D. and Powley, Edward J.}, month = dec, year = {2012}, note = {Publisher: Institute of Electrical and Electronics Engineers (IEEE)}, pages = {241--257}, file = {Accepted Version:/home/nemo/Zotero/storage/JI5MQ857/Cowling et al. - 2012 - Ensemble Determinization in Monte Carlo Tree Searc.pdf:application/pdf} } @article{bosch_optimal_2000, title = {Optimal {Card}-{Collecting} {Strategies} for {Magic}: {The} {Gathering}}, volume = {31}, url = {https://doi.org/10.1080%2F07468342.2000.11974103}, doi = {10.1080/07468342.2000.11974103}, number = {1}, journal = {The College Mathematics Journal}, author = {Bosch, Robert A.}, month = jan, year = {2000}, note = {Publisher: Informa UK Limited}, pages = {15--21}, file = {Full Text:/home/nemo/Zotero/storage/A6L5BUGS/Bosch - 2000 - Optimal Card-Collecting Strategies for Magic The .pdf:application/pdf} } @inproceedings{ward_monte_2009, title = {Monte {Carlo} search applied to card selection in {Magic}: {The} {Gathering}}, url = {https://doi.org/10.1109%2Fcig.2009.5286501}, doi = {10.1109/cig.2009.5286501}, booktitle = {2009 {IEEE} {Symposium} on {Computational} {Intelligence} and {Games}}, publisher = {IEEE}, author = {Ward, C. D. and Cowling, P. I.}, month = sep, year = {2009}, file = {Full Text:/home/nemo/Zotero/storage/GR28QUPQ/Ward and Cowling - 2009 - Monte Carlo search applied to card selection in Ma.pdf:application/pdf} } @incollection{demaine_is_2010, title = {{UNO} {Is} {Hard}, {Even} for a {Single} {Player}}, url = {https://doi.org/10.1007%2F978-3-642-13122-6_15}, booktitle = {Lecture {Notes} in {Computer} {Science}}, publisher = {Springer Berlin Heidelberg}, author = {Demaine, Erik D. and Demaine, Martin L. and Uehara, Ryuhei and Uno, Takeaki and Uno, Yushi}, year = {2010}, doi = {10.1007/978-3-642-13122-6_15}, pages = {133--144}, file = {Submitted Version:/home/nemo/Zotero/storage/75SB8JSY/Demaine et al. - 2010 - UNO Is Hard, Even for a Single Player.pdf:application/pdf} } @article{mishiba_quixo_2020, title = {{QUIXO} is {EXPTIME}-complete}, url = {https://doi.org/10.1016%2Fj.ipl.2020.105995}, doi = {10.1016/j.ipl.2020.105995}, journal = {Information Processing Letters}, author = {Mishiba, Shohei and Takenaga, Yasuhiko}, month = jul, year = {2020}, note = {Publisher: Elsevier BV}, pages = {105995}, file = {Full Text:/home/nemo/Zotero/storage/I6S8CB93/Mishiba and Takenaga - 2020 - QUIXO is EXPTIME-complete.pdf:application/pdf} } @incollection{woolford_scout_2017, title = {{SCOUT}: {A} {Case}-{Based} {Reasoning} {Agent} for {Playing} {Race} for the {Galaxy}}, url = {https://doi.org/10.1007%2F978-3-319-61030-6_27}, booktitle = {Case-{Based} {Reasoning} {Research} and {Development}}, publisher = {Springer International Publishing}, author = {Woolford, Michael and Watson, Ian}, year = {2017}, doi = {10.1007/978-3-319-61030-6_27}, pages = {390--402}, file = {Woolford and Watson - 2017 - SCOUT A Case-Based Reasoning Agent for Playing Ra.pdf:/home/nemo/Zotero/storage/LMIXD5XY/Woolford and Watson - 2017 - SCOUT A Case-Based Reasoning Agent for Playing Ra.pdf:application/pdf} } @article{coleman_game_2012, title = {Game, {Set}, {Math}}, volume = {85}, url = {https://doi.org/10.4169%2Fmath.mag.85.2.083}, doi = {10.4169/math.mag.85.2.083}, number = {2}, journal = {Mathematics Magazine}, author = {Coleman, Ben and Hartshorn, Kevin}, month = apr, year = {2012}, note = {Publisher: Informa UK Limited}, pages = {83--96}, file = {Full Text:/home/nemo/Zotero/storage/UZL88CQ4/Coleman and Hartshorn - 2012 - Game, Set, Math.pdf:application/pdf} } @article{glass_joy_2018, title = {The {Joy} of {SET}}, volume = {125}, url = {https://doi.org/10.1080%2F00029890.2018.1412661}, doi = {10.1080/00029890.2018.1412661}, number = {3}, journal = {The American Mathematical Monthly}, author = {Glass, Darren}, month = feb, year = {2018}, note = {Publisher: Informa UK Limited}, pages = {284--288}, file = {Full Text:/home/nemo/Zotero/storage/ID46MICU/Glass - 2018 - The Joy of SET.pdf:application/pdf} } @incollection{lazarusli_implementation_2015, title = {Implementation of {Artificial} {Intelligence} with 3 {Different} {Characters} of {AI} {Player} on “{Monopoly} {Deal}” {Computer} {Game}}, url = {https://doi.org/10.1007%2F978-3-662-46742-8_11}, booktitle = {Communications in {Computer} and {Information} {Science}}, publisher = {Springer Berlin Heidelberg}, author = {Lazarusli, Irene A. and Lukas, Samuel and Widjaja, Patrick}, year = {2015}, doi = {10.1007/978-3-662-46742-8_11}, pages = {119--127} } @incollection{pawlewicz_nearly_2011, title = {Nearly {Optimal} {Computer} {Play} in {Multi}-player {Yahtzee}}, url = {https://doi.org/10.1007%2F978-3-642-17928-0_23}, booktitle = {Computers and {Games}}, publisher = {Springer Berlin Heidelberg}, author = {Pawlewicz, Jakub}, year = {2011}, doi = {10.1007/978-3-642-17928-0_23}, pages = {250--262} } @inproceedings{glenn_computer_2007, title = {Computer {Strategies} for {Solitaire} {Yahtzee}}, url = {https://doi.org/10.1109%2Fcig.2007.368089}, doi = {10.1109/cig.2007.368089}, booktitle = {2007 {IEEE} {Symposium} on {Computational} {Intelligence} and {Games}}, publisher = {IEEE}, author = {Glenn, James R.}, year = {2007}, file = {Submitted Version:/home/nemo/Zotero/storage/GPCGB5MW/Glenn - 2007 - Computer Strategies for Solitaire Yahtzee.pdf:application/pdf} } @article{maynard_modeling_2001, title = {Modeling expert problem solving in a game of chance: a {Yahtzeec} case study}, volume = {18}, url = {https://doi.org/10.1111%2F1468-0394.00160}, doi = {10.1111/1468-0394.00160}, number = {2}, journal = {Expert Systems}, author = {Maynard, Ken and Moss, Patrick and Whitehead, Marcus and Narayanan, S. and Garay, Matt and Brannon, Nathan and Kantamneni, Raj Gopal and Kustra, Todd}, month = may, year = {2001}, note = {Publisher: Wiley}, pages = {88--98}, file = {Full Text:/home/nemo/Zotero/storage/PG6NUX5X/Maynard et al. - 2001 - Modeling expert problem solving in a game of chanc.pdf:application/pdf} } @incollection{oka_systematic_2016, title = {Systematic {Selection} of {N}-{Tuple} {Networks} for 2048}, url = {https://doi.org/10.1007%2F978-3-319-50935-8_8}, booktitle = {Computers and {Games}}, publisher = {Springer International Publishing}, author = {Oka, Kazuto and Matsuzaki, Kiminori}, year = {2016}, doi = {10.1007/978-3-319-50935-8_8}, pages = {81--92}, file = {Full Text:/home/nemo/Zotero/storage/DAJB4HAP/Oka and Matsuzaki - 2016 - Systematic Selection of N-Tuple Networks for 2048.pdf:application/pdf} } @inproceedings{matsuzaki_systematic_2016, title = {Systematic selection of {N}-tuple networks with consideration of interinfluence for game 2048}, url = {https://doi.org/10.1109%2Ftaai.2016.7880154}, doi = {10.1109/taai.2016.7880154}, booktitle = {2016 {Conference} on {Technologies} and {Applications} of {Artificial} {Intelligence} ({TAAI})}, publisher = {IEEE}, author = {Matsuzaki, Kiminori}, month = nov, year = {2016}, file = {Full Text:/home/nemo/Zotero/storage/LYN4IZ38/Matsuzaki - 2016 - Systematic selection of N-tuple networks with cons.pdf:application/pdf} } @inproceedings{rodgers_investigation_2014, title = {An investigation into 2048 {AI} strategies}, url = {https://doi.org/10.1109%2Fcig.2014.6932920}, doi = {10.1109/cig.2014.6932920}, booktitle = {2014 {IEEE} {Conference} on {Computational} {Intelligence} and {Games}}, publisher = {IEEE}, author = {Rodgers, Philip and Levine, John}, month = aug, year = {2014}, file = {Full Text:/home/nemo/Zotero/storage/GWVBHIAP/Rodgers and Levine - 2014 - An investigation into 2048 AI strategies.pdf:application/pdf} } @article{anthony_learning_2020, title = {Learning to {Play} {No}-{Press} {Diplomacy} with {Best} {Response} {Policy} {Iteration}}, url = {http://arxiv.org/abs/2006.04635v2}, journal = {arxiv:2006.04635}, author = {Anthony, Thomas and Eccles, Tom and Tacchetti, Andrea and Kramár, János and Gemp, Ian and Hudson, Thomas C. and Porcel, Nicolas and Lanctot, Marc and Pérolat, Julien and Everett, Richard and Singh, Satinder and Graepel, Thore and Bachrach, Yoram}, year = {2020}, file = {Full Text:/home/nemo/Zotero/storage/RKH36CBQ/Anthony et al. - 2020 - Learning to Play No-Press Diplomacy with Best Resp.pdf:application/pdf} } @article{paquette_no_2019, title = {No {Press} {Diplomacy}: {Modeling} {Multi}-{Agent} {Gameplay}}, url = {http://arxiv.org/abs/1909.02128v2}, journal = {arxiv:1909.02128}, author = {Paquette, Philip and Lu, Yuchen and Bocco, Steven and Smith, Max O. and Ortiz-Gagne, Satya and Kummerfeld, Jonathan K. and Singh, Satinder and Pineau, Joelle and Courville, Aaron}, year = {2019}, file = {Full Text:/home/nemo/Zotero/storage/YHUCJAG8/Paquette et al. - 2019 - No Press Diplomacy Modeling Multi-Agent Gameplay.pdf:application/pdf} } @article{tan_agent_2019, title = {Agent {Madoff}: {A} {Heuristic}-{Based} {Negotiation} {Agent} {For} {The} {Diplomacy} {Strategy} {Game}}, url = {http://arxiv.org/abs/1902.06996v1}, journal = {arxiv:1902.06996}, author = {Tan, Hao Hao}, year = {2019}, file = {Full Text:/home/nemo/Zotero/storage/6Z6CSYSZ/Tan - 2019 - Agent Madoff A Heuristic-Based Negotiation Agent .pdf:application/pdf} } @article{gedda_monte_2018-1, title = {Monte {Carlo} {Methods} for the {Game} {Kingdomino}}, url = {http://arxiv.org/abs/1807.04458v2}, journal = {arxiv:1807.04458}, author = {Gedda, Magnus and Lagerkvist, Mikael Z. and Butler, Martin}, year = {2018}, file = {Full Text:/home/nemo/Zotero/storage/T2BSPBPV/Gedda et al. - 2018 - Monte Carlo Methods for the Game Kingdomino.pdf:application/pdf} } @article{nguyen_np-completeness_2019, title = {{NP}-completeness of the game {Kingdomino}}, url = {http://arxiv.org/abs/1909.02849v3}, journal = {arxiv:1909.02849}, author = {Nguyen, Viet-Ha and Perrot, Kevin and Vallet, Mathieu}, year = {2019}, file = {Full Text:/home/nemo/Zotero/storage/32L6ZKCA/Nguyen et al. - 2019 - NP-completeness of the game Kingdomino.pdf:application/pdf} } @article{lerer_improving_2019, title = {Improving {Policies} via {Search} in {Cooperative} {Partially} {Observable} {Games}}, url = {http://arxiv.org/abs/1912.02318v1}, journal = {arxiv:1912.02318}, author = {Lerer, Adam and Hu, Hengyuan and Foerster, Jakob and Brown, Noam}, year = {2019}, file = {Full Text:/home/nemo/Zotero/storage/F2N99DK9/Lerer et al. - 2019 - Improving Policies via Search in Cooperative Parti.pdf:application/pdf} } @article{baffier_hanabi_2016, title = {Hanabi is {NP}-hard, {Even} for {Cheaters} who {Look} at {Their} {Cards}}, url = {http://arxiv.org/abs/1603.01911v3}, journal = {arxiv:1603.01911}, author = {Baffier, Jean-Francois and Chiu, Man-Kwun and Diez, Yago and Korman, Matias and Mitsou, Valia and Renssen, André van and Roeloffzen, Marcel and Uno, Yushi}, year = {2016}, file = {Full Text:/home/nemo/Zotero/storage/XMPLK7RJ/Baffier et al. - 2016 - Hanabi is NP-hard, Even for Cheaters who Look at T.pdf:application/pdf} } @article{canaan_generating_2020, title = {Generating and {Adapting} to {Diverse} {Ad}-{Hoc} {Cooperation} {Agents} in {Hanabi}}, url = {http://arxiv.org/abs/2004.13710v2}, journal = {arxiv:2004.13710}, author = {Canaan, Rodrigo and Gao, Xianbo and Togelius, Julian and Nealen, Andy and Menzel, Stefan}, year = {2020}, file = {Full Text:/home/nemo/Zotero/storage/PDZQXHYY/Canaan et al. - 2020 - Generating and Adapting to Diverse Ad-Hoc Cooperat.pdf:application/pdf} } @article{canaan_evaluating_2020, title = {Evaluating the {Rainbow} {DQN} {Agent} in {Hanabi} with {Unseen} {Partners}}, url = {http://arxiv.org/abs/2004.13291v1}, journal = {arxiv:2004.13291}, author = {Canaan, Rodrigo and Gao, Xianbo and Chung, Youjin and Togelius, Julian and Nealen, Andy and Menzel, Stefan}, year = {2020}, file = {Full Text:/home/nemo/Zotero/storage/DEVP82UJ/Canaan et al. - 2020 - Evaluating the Rainbow DQN Agent in Hanabi with Un.pdf:application/pdf} } @article{biderman_magic_2020, title = {Magic: the {Gathering} is as {Hard} as {Arithmetic}}, url = {http://arxiv.org/abs/2003.05119v1}, journal = {arxiv:2003.05119}, author = {Biderman, Stella}, year = {2020}, file = {Full Text:/home/nemo/Zotero/storage/N83MTIN9/Biderman - 2020 - Magic the Gathering is as Hard as Arithmetic.pdf:application/pdf} } @article{churchill_magic_2019, title = {Magic: {The} {Gathering} is {Turing} {Complete}}, url = {http://arxiv.org/abs/1904.09828v2}, journal = {arxiv:1904.09828}, author = {Churchill, Alex and Biderman, Stella and Herrick, Austin}, year = {2019}, file = {Full Text:/home/nemo/Zotero/storage/5NW5WTWK/Churchill et al. - 2019 - Magic The Gathering is Turing Complete.pdf:application/pdf} } @article{zilio_neural_2018, title = {Neural {Networks} {Models} for {Analyzing} {Magic}: the {Gathering} {Cards}}, url = {http://arxiv.org/abs/1810.03744v1}, journal = {arxiv:1810.03744}, author = {Zilio, Felipe and Prates, Marcelo}, year = {2018}, file = {Full Text:/home/nemo/Zotero/storage/VX32HLNF/Zilio et al. - 2018 - Neural Networks Models for Analyzing Magic the Ga.pdf:application/pdf} } @inproceedings{grichshenko_using_2020, title = {Using {Tabu} {Search} {Algorithm} for {Map} {Generation} in the {Terra} {Mystica} {Tabletop} {Game}}, url = {https://doi.org/10.1145%2F3396474.3396492}, doi = {10.1145/3396474.3396492}, booktitle = {Proceedings of the 2020 4th {International} {Conference} on {Intelligent} {Systems}, {Metaheuristics} \& {Swarm} {Intelligence}}, publisher = {ACM}, author = {Grichshenko, Alexandr and Araújo, Luiz Jonatã Pires de and Gimaeva, Susanna and Brown, Joseph Alexander}, month = mar, year = {2020}, file = {Submitted Version:/home/nemo/Zotero/storage/4LSZ3R5D/Grichshenko et al. - 2020 - Using Tabu Search Algorithm for Map Generation in .pdf:application/pdf} } @article{migdal_mathematical_2010, title = {A mathematical model of the {Mafia} game}, url = {http://arxiv.org/abs/1009.1031v3}, journal = {arxiv:1009.1031}, author = {Migdał, Piotr}, year = {2010}, file = {Full Text:/home/nemo/Zotero/storage/RCJ7EPW7/Migdał - 2010 - A mathematical model of the Mafia game.pdf:application/pdf} } @article{demaine_complexity_2010, title = {The complexity of {UNO}}, url = {http://arxiv.org/abs/1003.2851v3}, journal = {arxiv:1003.2851}, author = {Demaine, Erik D. and Demaine, Martin L. and Harvey, Nicholas J. A. and Uehara, Ryuhei and Uno, Takeaki and Uno, Yushi}, year = {2010}, file = {Full Text:/home/nemo/Zotero/storage/KNHHMQC3/Demaine et al. - 2010 - The complexity of UNO.pdf:application/pdf} } @article{almanza_trainyard_2016, title = {Trainyard is {NP}-{Hard}}, url = {http://arxiv.org/abs/1603.00928v1}, journal = {arxiv:1603.00928}, author = {Almanza, Matteo and Leucci, Stefano and Panconesi, Alessandro}, year = {2016}, file = {Full Text:/home/nemo/Zotero/storage/6XZDBHIF/Almanza et al. - 2016 - Trainyard is NP-Hard.pdf:application/pdf} } @article{langerman_threes_2015, title = {Threes!, {Fives}, 1024!, and 2048 are {Hard}}, url = {http://arxiv.org/abs/1505.04274v1}, journal = {arxiv:1505.04274}, author = {Langerman, Stefan and Uno, Yushi}, year = {2015}, file = {Full Text:/home/nemo/Zotero/storage/EKHK8LWW/Langerman and Uno - 2015 - Threes!, Fives, 1024!, and 2048 are Hard.pdf:application/pdf} } @article{eppstein_making_2018, title = {Making {Change} in 2048}, url = {http://arxiv.org/abs/1804.07396v1}, journal = {arxiv:1804.07396}, author = {Eppstein, David}, year = {2018}, file = {Full Text:/home/nemo/Zotero/storage/MTEUWS7P/Eppstein - 2018 - Making Change in 2048.pdf:application/pdf} } @article{das_analysis_2018, title = {Analysis of the {Game} "2048" and its {Generalization} in {Higher} {Dimensions}}, url = {http://arxiv.org/abs/1804.07393v2}, journal = {arxiv:1804.07393}, author = {Das, Madhuparna and Paul, Goutam}, year = {2018}, file = {Full Text:/home/nemo/Zotero/storage/IVPCDJKF/Das and Paul - 2018 - Analysis of the Game 2048 and its Generalization.pdf:application/pdf} } @article{yeh_multi-stage_2016, title = {Multi-{Stage} {Temporal} {Difference} {Learning} for 2048-like {Games}}, url = {http://arxiv.org/abs/1606.07374v2}, journal = {arxiv:1606.07374}, author = {Yeh, Kun-Hao and Wu, I.-Chen and Hsueh, Chu-Hsuan and Chang, Chia-Chuan and Liang, Chao-Chin and Chiang, Han}, year = {2016}, file = {Full Text:/home/nemo/Zotero/storage/XYA7M7R4/Yeh et al. - 2016 - Multi-Stage Temporal Difference Learning for 2048-.pdf:application/pdf} } @article{mehta_2048_2014, title = {2048 is ({PSPACE}) {Hard}, but {Sometimes} {Easy}}, url = {http://arxiv.org/abs/1408.6315v1}, journal = {arxiv:1408.6315}, author = {Mehta, Rahul}, year = {2014}, file = {Full Text:/home/nemo/Zotero/storage/TDMX7RFI/Mehta - 2014 - 2048 is (PSPACE) Hard, but Sometimes Easy.pdf:application/pdf} } @misc{noauthor_settlers_nodate, title = {Settlers of {Catan} bot trained using reinforcement learning}, url = {https://jonzia.github.io/Catan/} } @inproceedings{guhe_trading_2012, title = {Trading in a multiplayer board game: {Towards} an analysis of non-cooperative dialogue}, volume = {34}, booktitle = {Proceedings of the {Annual} {Meeting} of the {Cognitive} {Science} {Society}}, author = {Guhe, Markus and Lascarides, Alex}, year = {2012}, note = {Issue: 34}, file = {Guhe and Lascarides - 2012 - Trading in a multiplayer board game Towards an an.pdf:/home/nemo/Zotero/storage/AT8UHTXM/Guhe and Lascarides - 2012 - Trading in a multiplayer board game Towards an an.pdf:application/pdf} } @article{noauthor_pomcp_nodate, title = {{POMCP} with {Human} {Preferencesin} {Settlers} of {Catan}}, url = {https://www.aaai.org/ocs/index.php/AIIDE/AIIDE18/paper/viewFile/18091/17217}, file = {POMCP with Human Preferencesin Settlers of Catan.pdf:/home/nemo/Zotero/storage/CA62SLVK/POMCP with Human Preferencesin Settlers of Catan.pdf:application/pdf} } @misc{noauthor_impact_nodate, title = {The impact of loaded dice in {Catan}}, url = {https://izbicki.me/blog/how-to-cheat-at-settlers-of-catan-by-loading-the-dice-and-prove-it-with-p-values.html} } @article{noauthor_monte_nodate, title = {Monte {Carlo} {Tree} {Search} in a {Modern} {Board} {Game} {Framework}}, url = {https://project.dke.maastrichtuniversity.nl/games/files/bsc/Roelofs_Bsc-paper.pdf}, file = {Full Text:/home/nemo/Zotero/storage/QJUD6RDZ/Monte Carlo Tree Search in a Modern Board Game Fra.pdf:application/pdf} } @book{pfeiffer_reinforcement_2004, title = {Reinforcement {Learning} of {Strategies} for {Settlers} of {Catan}}, author = {Pfeiffer, Michael}, year = {2004}, file = {Pfeiffer - 2004 - Reinforcement Learning of Strategies for Settlers .pdf:/home/nemo/Zotero/storage/9KJ7QYK4/Pfeiffer - 2004 - Reinforcement Learning of Strategies for Settlers .pdf:application/pdf} } @misc{noauthor_intelligent_nodate, title = {An {Intelligent} {Artificial} {Player} for the {Game} of {Risk}}, url = {http://www.ke.tu-darmstadt.de/lehre/archiv/ss04/oberseminar/folien/Wolf_Michael-Slides.pdf}, file = {An Intelligent Artificial Player for the Game of R.pdf:/home/nemo/Zotero/storage/89MUCUE7/An Intelligent Artificial Player for the Game of R.pdf:application/pdf} } @article{noauthor_risky_nodate, title = {{RISKy} {Business}: {An} {In}-{Depth} {Look} at the {Game} {RISK}}, url = {https://scholar.rose-hulman.edu/rhumj/vol3/iss2/3}, file = {RISKy Business An In-Depth Look at the Game RISK.pdf:/home/nemo/Zotero/storage/PT8CWUJ5/RISKy Business An In-Depth Look at the Game RISK.pdf:application/pdf} } @article{noauthor_risk_nodate, title = {{RISK} {Board} {Game} ‐ {Battle} {Outcome} {Analysis}}, url = {http://www.c4i.gr/xgeorgio/docs/RISK-board-game%20_rev-3.pdf}, file = {RISK Board Game ‐ Battle Outcome Analysis.pdf:/home/nemo/Zotero/storage/IJR85DGR/RISK Board Game ‐ Battle Outcome Analysis.pdf:application/pdf;Full Text:/home/nemo/Zotero/storage/WPYDQ5CF/RISK Board Game ‐ Battle Outcome Analysis.pdf:application/pdf} } @book{olsson_multi-agent_2005, title = {A multi-agent system for playing the board game risk}, author = {Olsson, Fredrik}, year = {2005} } @misc{noauthor_state_nodate, title = {State {Representation} and {Polyomino} {Placement} for the {Game} {Patchwork}}, url = {https://zayenz.se/blog/post/patchwork-modref2019-paper/} } @article{lagerkvist_state_2020, title = {State {Representation} and {Polyomino} {Placement} for the {Game} {Patchwork}}, url = {http://arxiv.org/abs/2001.04233}, abstract = {Modern board games are a rich source of entertainment for many people, but also contain interesting and challenging structures for game playing research and implementing game playing agents. This paper studies the game Patchwork, a two player strategy game using polyomino tile drafting and placement. The core polyomino placement mechanic is implemented in a constraint model using regular constraints, extending and improving the model in (Lagerkvist, Pesant, 2008) with: explicit rotation handling; optional placements; and new constraints for resource usage. Crucial for implementing good game playing agents is to have great heuristics for guiding the search when faced with large branching factors. This paper divides placing tiles into two parts: a policy used for placing parts and an evaluation used to select among different placements. Policies are designed based on classical packing literature as well as common standard constraint programming heuristics. For evaluation, global propagation guided regret is introduced, choosing placements based on not ruling out later placements. Extensive evaluations are performed, showing the importance of using a good evaluation and that the proposed global propagation guided regret is a very effective guide.}, urldate = {2020-07-21}, journal = {arXiv:2001.04233 [cs]}, author = {Lagerkvist, Mikael Zayenz}, month = jan, year = {2020}, note = {arXiv: 2001.04233}, keywords = {Computer Science - Artificial Intelligence}, annote = {Code: https://github.com/zayenz/cp-mod-ref-2019-patchwork  }, annote = {Comment: In ModRef 2019, The 18th workshop on Constraint Modelling and Reformulation}, file = {arXiv Fulltext PDF:/home/nemo/Zotero/storage/TEV9W4CI/Lagerkvist - 2020 - State Representation and Polyomino Placement for t.pdf:application/pdf;arXiv.org Snapshot:/home/nemo/Zotero/storage/CWN9FKIC/2001.html:text/html} } @misc{noauthor_state_nodate-1, title = {State {Representation} and {Polyomino} {Placement} for the {Game} {Patchwork}}, url = {https://zayenz.se/papers/Lagerkvist_ModRef_2019_Presentation.pdf}, file = {Full Text:/home/nemo/Zotero/storage/JVLQG3BV/State Representation and Polyomino Placement for t.pdf:application/pdf} } @article{lagerkvist_nmbr9_2020, title = {Nmbr9 as a {Constraint} {Programming} {Challenge}}, url = {http://arxiv.org/abs/2001.04238}, abstract = {Modern board games are a rich source of interesting and new challenges for combinatorial problems. The game Nmbr9 is a solitaire style puzzle game using polyominoes. The rules of the game are simple to explain, but modelling the game effectively using constraint programming is hard. This abstract presents the game, contributes new generalized variants of the game suitable for benchmarking and testing, and describes a model for the presented variants. The question of the top possible score in the standard game is an open challenge.}, urldate = {2020-07-21}, journal = {arXiv:2001.04238 [cs]}, author = {Lagerkvist, Mikael Zayenz}, month = jan, year = {2020}, note = {arXiv: 2001.04238}, keywords = {Computer Science - Artificial Intelligence}, annote = {Code: https://github.com/zayenz/cp-2019-nmbr9/}, annote = {Comment: Abstract at the 25th International Conference on Principles and Practice of Constraint Programming}, file = {arXiv Fulltext PDF:/home/nemo/Zotero/storage/6YRVYGL7/Lagerkvist - 2020 - Nmbr9 as a Constraint Programming Challenge.pdf:application/pdf;arXiv.org Snapshot:/home/nemo/Zotero/storage/ZP8RUPEW/2001.html:text/html} } @misc{noauthor_nmbr9_nodate, title = {Nmbr9 as a {Constraint} {Programming} {Challenge}}, url = {https://zayenz.se/blog/post/nmbr9-cp2019-abstract/} } @inproceedings{goodman_re-determinizing_2019, title = {Re-determinizing {MCTS} in {Hanabi}}, doi = {10.1109/CIG.2019.8848097}, author = {Goodman, James}, year = {2019}, pages = {1--8}, file = {Goodman - 2019 - Re-determinizing MCTS in Hanabi.pdf:/home/nemo/Zotero/storage/MQ8AF9RF/Goodman - 2019 - Re-determinizing MCTS in Hanabi.pdf:application/pdf} } @inproceedings{canaan_evolving_2018, address = {Maastricht}, title = {Evolving {Agents} for the {Hanabi} 2018 {CIG} {Competition}}, isbn = {978-1-5386-4359-4}, url = {https://ieeexplore.ieee.org/document/8490449/}, doi = {10.1109/CIG.2018.8490449}, urldate = {2020-07-21}, booktitle = {2018 {IEEE} {Conference} on {Computational} {Intelligence} and {Games} ({CIG})}, publisher = {IEEE}, author = {Canaan, Rodrigo and Shen, Haotian and Torrado, Ruben and Togelius, Julian and Nealen, Andy and Menzel, Stefan}, month = aug, year = {2018}, pages = {1--8}, file = {Submitted Version:/home/nemo/Zotero/storage/XP6SKHQI/Canaan et al. - 2018 - Evolving Agents for the Hanabi 2018 CIG Competitio.pdf:application/pdf} } @incollection{bosse_aspects_2017, address = {Cham}, title = {Aspects of the {Cooperative} {Card} {Game} {Hanabi}}, volume = {765}, isbn = {978-3-319-67467-4 978-3-319-67468-1}, url = {http://link.springer.com/10.1007/978-3-319-67468-1_7}, urldate = {2020-07-21}, booktitle = {{BNAIC} 2016: {Artificial} {Intelligence}}, publisher = {Springer International Publishing}, author = {van den Bergh, Mark J. H. and Hommelberg, Anne and Kosters, Walter A. and Spieksma, Flora M.}, editor = {Bosse, Tibor and Bredeweg, Bert}, year = {2017}, doi = {10.1007/978-3-319-67468-1_7}, note = {Series Title: Communications in Computer and Information Science}, pages = {93--105}, file = {Full Text:/home/nemo/Zotero/storage/6TLZ7TUH/van den Bergh et al. - 2017 - Aspects of the Cooperative Card Game Hanabi.pdf:application/pdf} } @incollection{winands_playing_2017, address = {Cham}, title = {Playing {Hanabi} {Near}-{Optimally}}, volume = {10664}, isbn = {978-3-319-71648-0 978-3-319-71649-7}, url = {http://link.springer.com/10.1007/978-3-319-71649-7_5}, urldate = {2020-07-21}, booktitle = {Advances in {Computer} {Games}}, publisher = {Springer International Publishing}, author = {Bouzy, Bruno}, editor = {Winands, Mark H.M. and van den Herik, H. Jaap and Kosters, Walter A.}, year = {2017}, doi = {10.1007/978-3-319-71649-7_5}, note = {Series Title: Lecture Notes in Computer Science}, pages = {51--62} } @inproceedings{eger_intentional_2017, address = {New York, NY, USA}, title = {An intentional {AI} for hanabi}, isbn = {978-1-5386-3233-8}, url = {http://ieeexplore.ieee.org/document/8080417/}, doi = {10.1109/CIG.2017.8080417}, urldate = {2020-07-21}, booktitle = {2017 {IEEE} {Conference} on {Computational} {Intelligence} and {Games} ({CIG})}, publisher = {IEEE}, author = {Eger, Markus and Martens, Chris and Cordoba, Marcela Alfaro}, month = aug, year = {2017}, pages = {68--75}, file = {Full Text:/home/nemo/Zotero/storage/E3H565Y9/Eger et al. - 2017 - An intentional AI for hanabi.pdf:application/pdf} } @inproceedings{osawa_solving_2015, title = {Solving {Hanabi}: {Estimating} {Hands} by {Opponent}'s {Actions} in {Cooperative} {Game} with {Incomplete} {Information}}, url = {https://aaai.org/ocs/index.php/WS/AAAIW15/paper/view/10167}, abstract = {A unique behavior of humans is modifying one’s unobservable behavior based on the reaction of others for cooperation. We used a card game called Hanabi as an evaluation task of imitating human reflective intelligence with artificial intelligence. Hanabi is a cooperative card game with incomplete information. A player cooperates with an opponent in building several card sets constructed with the same color and ordered numbers. However, like a blind man's bluff, each player sees the cards of all other players except his/her own. Also, communication between players is restricted to information about the same numbers and colors, and the player is required to read his/his opponent's intention with the opponent's hand, estimate his/her cards with incomplete information, and play one of them for building a set. We compared human play with several simulated strategies. The results indicate that the strategy with feedbacks from simulated opponent's viewpoints achieves more score than other strategies.}, author = {Osawa, Hirotaka}, year = {2015}, file = {Osawa - 2015 - Solving Hanabi Estimating Hands by Opponent's Act.pdf:/home/nemo/Zotero/storage/7TRVJGUC/Osawa - 2015 - Solving Hanabi Estimating Hands by Opponent's Act.pdf:application/pdf} } @article{eger_browser-based_2017, title = {A {Browser}-based {Interface} for the {Exploration} and {Evaluation} of {Hanabi} {AIs}}, url = {http://fdg2017.org/papers/FDG2017_demo_Hanabi.pdf}, language = {en}, journal = {Cape Cod}, author = {Eger, Markus and Martens, Chris}, year = {2017}, pages = {4}, annote = {URL: http://fdg2017.org/papers/FDG2017\_demo\_Hanabi.pdf}, file = {Eger and Martens - 2017 - A Browser-based Interface for the Exploration and .pdf:/home/nemo/Zotero/storage/RE7PCTMZ/Eger and Martens - 2017 - A Browser-based Interface for the Exploration and .pdf:application/pdf} } @article{gottwald_i_nodate, title = {I see what you see: {Integrating} eye tracking into {Hanabi} playing agents}, abstract = {Humans’ eye movements convey a lot of information about their intentions, often unconsciously. Intelligent agents that cooperate with humans in various domains can benefit from interpreting this information. This paper contains a preliminary look at how eye tracking could be useful for agents that play the cooperative card game Hanabi with human players. We outline several situations in which an AI agent can utilize gaze information, and present an outlook on how we plan to integrate this with reimplementations of contemporary Hanabi agents.}, language = {en}, author = {Gottwald, Eva Tallula and Eger, Markus and Martens, Chris}, pages = {4}, annote = {URL: http://www.exag.org/wp-content/uploads/2018/10/AIIDE-18\_Upload\_112.pdf  }, file = {Gottwald et al. - I see what you see Integrating eye tracking into .pdf:/home/nemo/Zotero/storage/5STNIF33/Gottwald et al. - I see what you see Integrating eye tracking into .pdf:application/pdf} } @misc{noauthor_state_nodate-2, title = {State of the art {Hanabi} bots + simulation framework in rust}, url = {https://github.com/WuTheFWasThat/hanabi.rs} } @misc{noauthor_strategy_nodate, title = {A strategy simulator for the well-known cooperative card game {Hanabi}}, url = {https://github.com/rjtobin/HanSim} } @misc{noauthor_framework_nodate, title = {A framework for writing bots that play {Hanabi}}, url = {https://github.com/Quuxplusone/Hanabi} } @article{dehaan_jidoukan_2020, series = {Ludic {Language} {Pedagogy}}, title = {Jidoukan {Jenga}: {Teaching} {English} through remixing games and game rules}, shorttitle = {Teaching {English} through remixing games and game rules}, url = {https://www.llpjournal.org/2020/04/13/jidokan-jenga.html}, abstract = {Let students play simple games in their L1. It’s ok! Then: You, the teacher, can help them critique the game in their L2. You, the teacher, can help them change the game in their L2. You, the teacher, can help them develop themselves. \#dropthestick \#dropthecarrot \#bringmeaning}, journal = {Ludic Language Pedagogy}, author = {deHaan, Jonathan}, month = apr, year = {2020}, note = {📍 What is this? This is a recollection of a short lesson with some children. I used Jenga and a dictionary. 📍 Why did you make it? I want to show language teachers that simple games, and playing simple games in students’ first language can be a great foundation for helping students learn new vocabulary, think critically, and exercise creativity. 📍 Why is it radical? I taught using a simple board game (at a time when video games are over-focused on in research). I show what the learning looks like (I include a photo). The teaching and learning didn’t occur in a laboratory setting, but in the wild (in a community center). I focused on the learning around games. 📍 Who is it for? Language teachers can easily implement this lesson using Jenga or any other game. Language researchers can expand on the translating and remixing potential around games.}, file = {deHaan - 2020 - Jidoukan Jenga Teaching English through remixing .pdf:/home/nemo/Zotero/storage/9B6YJUWQ/deHaan - 2020 - Jidoukan Jenga Teaching English through remixing .pdf:application/pdf} } @article{heron_meeple_2018, title = {Meeple {Centred} {Design}: {A} {Heuristic} {Toolkit} for {Evaluating} the {Accessibility} of {Tabletop} {Games}}, volume = {7}, issn = {2052-773X}, shorttitle = {Meeple {Centred} {Design}}, url = {http://link.springer.com/10.1007/s40869-018-0057-8}, doi = {10.1007/s40869-018-0057-8}, language = {en}, number = {2}, urldate = {2020-07-28}, journal = {The Computer Games Journal}, author = {Heron, Michael James and Belford, Pauline Helen and Reid, Hayley and Crabb, Michael}, month = jun, year = {2018}, pages = {97--114}, file = {Full Text:/home/nemo/Zotero/storage/A6WJQYW2/Heron et al. - 2018 - Meeple Centred Design A Heuristic Toolkit for Eva.pdf:application/pdf} } @article{heron_eighteen_2018, title = {Eighteen {Months} of {Meeple} {Like} {Us}: {An} {Exploration} into the {State} of {Board} {Game} {Accessibility}}, volume = {7}, issn = {2052-773X}, shorttitle = {Eighteen {Months} of {Meeple} {Like} {Us}}, url = {http://link.springer.com/10.1007/s40869-018-0056-9}, doi = {10.1007/s40869-018-0056-9}, language = {en}, number = {2}, urldate = {2020-07-28}, journal = {The Computer Games Journal}, author = {Heron, Michael James and Belford, Pauline Helen and Reid, Hayley and Crabb, Michael}, month = jun, year = {2018}, pages = {75--95}, file = {Full Text:/home/nemo/Zotero/storage/B3NFVIMW/Heron et al. - 2018 - Eighteen Months of Meeple Like Us An Exploration .pdf:application/pdf} } @phdthesis{andel_complexity_2020, type = {Bachelor thesis}, title = {On the complexity of {Hive}}, shorttitle = {On the complexity of {Hive}}, url = {https://dspace.library.uu.nl/handle/1874/396955}, abstract = {It is shown that for an arbitrary position of a Hive game where both players have the same set of N pieces it is PSPACE-hard to determine whether one of the players has a winning strategy. The proof is done by reducing the known PSPACE-complete set of true quantified boolean formulas to a game concerning these formulas, then to the game generalised geography, then to a version of that game with the restriction of having only nodes with maximum degree 3, and finally to generalised Hive. This thesis includes a short introduction to the subject of computational complexity.}, language = {en-US}, school = {Utrecht University}, author = {Andel, Daniël}, month = may, year = {2020}, file = {Andel - 2020 - On the complexity of Hive.pdf:/home/nemo/Zotero/storage/5TWTM295/Andel - 2020 - On the complexity of Hive.pdf:application/pdf} } @article{kunda_creative_2020, title = {Creative {Captioning}: {An} {AI} {Grand} {Challenge} {Based} on the {Dixit} {Board} {Game}}, shorttitle = {Creative {Captioning}}, url = {http://arxiv.org/abs/2010.00048}, abstract = {We propose a new class of "grand challenge" AI problems that we call creative captioning---generating clever, interesting, or abstract captions for images, as well as understanding such captions. Creative captioning draws on core AI research areas of vision, natural language processing, narrative reasoning, and social reasoning, and across all these areas, it requires sophisticated uses of common sense and cultural knowledge. In this paper, we analyze several specific research problems that fall under creative captioning, using the popular board game Dixit as both inspiration and proposed testing ground. We expect that Dixit could serve as an engaging and motivating benchmark for creative captioning across numerous AI research communities for the coming 1-2 decades.}, urldate = {2020-10-12}, journal = {arXiv:2010.00048 [cs]}, author = {Kunda, Maithilee and Rabkina, Irina}, month = sep, year = {2020}, note = {arXiv: 2010.00048}, keywords = {Computer Science - Artificial Intelligence}, file = {arXiv Fulltext PDF:/home/nemo/Zotero/storage/8VJ5WNFQ/Kunda and Rabkina - 2020 - Creative Captioning An AI Grand Challenge Based o.pdf:application/pdf;arXiv.org Snapshot:/home/nemo/Zotero/storage/Y9MEAFJC/2010.html:text/html} } @misc{noauthor_shobu_nodate, title = {Shobu {AI} {Playground}}, url = {https://github.com/JayWalker512/Shobu} } @misc{noauthor_shobu_nodate-1, title = {Shobu randomly played games dataset}, url = {https://www.kaggle.com/bsfoltz/shobu-randomly-played-games-104k} } @inproceedings{de_mesentier_silva_ai-based_2017, address = {Hyannis, Massachusetts}, title = {{AI}-based playtesting of contemporary board games}, isbn = {978-1-4503-5319-9}, url = {http://dl.acm.org/citation.cfm?doid=3102071.3102105}, doi = {10.1145/3102071.3102105}, language = {en}, urldate = {2020-10-12}, booktitle = {Proceedings of the {International} {Conference} on the {Foundations} of {Digital} {Games} - {FDG} '17}, publisher = {ACM Press}, author = {de Mesentier Silva, Fernando and Lee, Scott and Togelius, Julian and Nealen, Andy}, year = {2017}, pages = {1--10}, file = {Full Text:/home/nemo/Zotero/storage/BYYCGVG7/de Mesentier Silva et al. - 2017 - AI-based playtesting of contemporary board games.pdf:application/pdf} } @misc{copley_materials_nodate, title = {Materials for {Ticket} to {Ride} {Seattle} and a framework for making more game boards}, url = {https://github.com/dovinmu/ttr_generator}, author = {Copley, Rowan} } @techreport{nguyen_httpswwweecstuftsedujsinapovteachingcomp150_rlreportsnguyen_dinjian_reportpdf_nodate, title = {https://www.eecs.tufts.edu/{\textasciitilde}jsinapov/teaching/comp150\_RL/reports/{Nguyen}\_Dinjian\_report.pdf}, url = {https://www.eecs.tufts.edu/~jsinapov/teaching/comp150_RL/reports/Nguyen_Dinjian_report.pdf}, abstract = {Ticket to Ride is a very popular, award-winning board-game where you try toscore the most points while building a railway spanning cities in America. For acomputer to learn to play this game is very difficult due to the vast state-actionspace. This project will explain why featurizing your state, and implementingcurriculum learning can help agents learn as state-action spaces grow too largefor traditional learning methods to be effective.}, author = {Nguyen, Cuong and Dinjian, Daniel} } @inproceedings{de_mesentier_silva_evolving_2018, address = {Malmö Sweden}, title = {Evolving maps and decks for ticket to ride}, isbn = {978-1-4503-6571-0}, url = {https://dl.acm.org/doi/10.1145/3235765.3235813}, doi = {10.1145/3235765.3235813}, language = {en}, urldate = {2020-10-12}, booktitle = {Proceedings of the 13th {International} {Conference} on the {Foundations} of {Digital} {Games}}, publisher = {ACM}, author = {de Mesentier Silva, Fernando and Lee, Scott and Togelius, Julian and Nealen, Andy}, month = aug, year = {2018}, pages = {1--7}, file = {Full Text:/home/nemo/Zotero/storage/LRU3P3CX/de Mesentier Silva et al. - 2018 - Evolving maps and decks for ticket to ride.pdf:application/pdf} } @misc{witter_applications_nodate, title = {Applications of {Graph} {Theory} {andProbability} in the {Board} {GameTicket} {toRide}}, url = {https://www.rtealwitter.com/slides/2020-JMM.pdf}, author = {Witter, R. Teal and Lyford, Alex} } @article{gendre_playing_2020, title = {Playing {Catan} with {Cross}-dimensional {Neural} {Network}}, url = {http://arxiv.org/abs/2008.07079}, abstract = {Catan is a strategic board game having interesting properties, including multi-player, imperfect information, stochastic, complex state space structure (hexagonal board where each vertex, edge and face has its own features, cards for each player, etc), and a large action space (including negotiation). Therefore, it is challenging to build AI agents by Reinforcement Learning (RL for short), without domain knowledge nor heuristics. In this paper, we introduce cross-dimensional neural networks to handle a mixture of information sources and a wide variety of outputs, and empirically demonstrate that the network dramatically improves RL in Catan. We also show that, for the first time, a RL agent can outperform jsettler, the best heuristic agent available.}, urldate = {2020-10-12}, journal = {arXiv:2008.07079 [cs, stat]}, author = {Gendre, Quentin and Kaneko, Tomoyuki}, month = aug, year = {2020}, note = {arXiv: 2008.07079}, keywords = {Computer Science - Artificial Intelligence, Computer Science - Machine Learning, Statistics - Machine Learning}, annote = {Comment: 12 pages, 5 tables and 10 figures; submitted to the ICONIP 2020}, file = {arXiv Fulltext PDF:/home/nemo/Zotero/storage/AU6NYDIV/Gendre and Kaneko - 2020 - Playing Catan with Cross-dimensional Neural Networ.pdf:application/pdf;arXiv.org Snapshot:/home/nemo/Zotero/storage/NKRW6UKC/2008.html:text/html} } @inproceedings{theodoridis_monte_2020, address = {Athens Greece}, title = {Monte {Carlo} {Tree} {Search} for the {Game} of {Diplomacy}}, isbn = {978-1-4503-8878-8}, url = {https://dl.acm.org/doi/10.1145/3411408.3411413}, doi = {10.1145/3411408.3411413}, language = {en}, urldate = {2020-10-12}, booktitle = {11th {Hellenic} {Conference} on {Artificial} {Intelligence}}, publisher = {ACM}, author = {Theodoridis, Alexios and Chalkiadakis, Georgios}, month = sep, year = {2020}, pages = {16--25} } @article{eger_operationalizing_2020, title = {Operationalizing {Intentionality} to {Play} {Hanabi} with {Human} {Players}}, issn = {2475-1502, 2475-1510}, url = {https://ieeexplore.ieee.org/document/9140404/}, doi = {10.1109/TG.2020.3009359}, urldate = {2020-11-26}, journal = {IEEE Transactions on Games}, author = {Eger, Markus and Martens, Chris and Sauma Chacon, Pablo and Alfaro Cordoba, Marcela and Hidalgo Cespedes, Jeisson}, year = {2020}, pages = {1--1}, file = {Full Text:/home/nemo/Zotero/storage/V2M3QSJG/Eger et al. - 2020 - Operationalizing Intentionality to Play Hanabi wit.pdf:application/pdf} } @article{canaan_behavioral_2020, title = {Behavioral {Evaluation} of {Hanabi} {Rainbow} {DQN} {Agents} and {Rule}-{Based} {Agents}}, volume = {16}, url = {https://ojs.aaai.org/index.php/AIIDE/article/view/7404}, abstract = {\<p class=\"abstract\"\>Hanabi is a multiplayer cooperative card game, where only your partners know your cards. All players succeed or fail together. This makes the game an excellent testbed for studying collaboration. Recently, it has been shown that deep neural networks can be trained through self-play to play the game very well. However, such agents generally do not play well with others. In this paper, we investigate the consequences of training Rainbow DQN agents with human-inspired rule-based agents. We analyze with which agents Rainbow agents learn to play well, and how well playing skill transfers to agents they were not trained with. We also analyze patterns of communication between agents to elucidate how collaboration happens. A key finding is that while most agents only learn to play well with partners seen during training, one particular agent leads the Rainbow algorithm towards a much more general policy. The metrics and hypotheses advanced in this paper can be used for further study of collaborative agents.\</p\>}, number = {1}, urldate = {2020-11-26}, journal = {Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment}, author = {Canaan, Rodrigo and Gao, Xianbo and Chung, Youjin and Togelius, Julian and Nealen, Andy and Menzel, Stefan}, month = oct, year = {2020}, note = {Section: Full Oral Papers}, pages = {31--37} } @inproceedings{_playing_2020, title = {Playing mini-{Hanabi} card game with {Q}-learning}, volume = {2020}, url = {http://id.nii.ac.jp/1001/00205046/}, booktitle = {第82回全国大会講演論文集}, author = {ひい, とう and 市来, 正裕 and 中里, 研一}, month = feb, year = {2020}, note = {Issue: 1}, pages = {41--42} } @article{reinhardt_competing_2020, title = {Competing in a {Complex} {Hidden} {Role} {Game} with {Information} {Set} {Monte} {Carlo} {Tree} {Search}}, url = {http://arxiv.org/abs/2005.07156}, abstract = {Advances in intelligent game playing agents have led to successes in perfect information games like Go and imperfect information games like Poker. The Information Set Monte Carlo Tree Search (ISMCTS) family of algorithms outperforms previous algorithms using Monte Carlo methods in imperfect information games. In this paper, Single Observer Information Set Monte Carlo Tree Search (SO-ISMCTS) is applied to Secret Hitler, a popular social deduction board game that combines traditional hidden role mechanics with the randomness of a card deck. This combination leads to a more complex information model than the hidden role and card deck mechanics alone. It is shown in 10108 simulated games that SO-ISMCTS plays as well as simpler rule based agents, and demonstrates the potential of ISMCTS algorithms in complicated information set domains.}, urldate = {2020-11-26}, journal = {arXiv:2005.07156 [cs]}, author = {Reinhardt, Jack}, month = may, year = {2020}, note = {arXiv: 2005.07156}, keywords = {Computer Science - Artificial Intelligence, Computer Science - Multiagent Systems}, file = {arXiv Fulltext PDF:/home/nemo/Zotero/storage/D7TPSJ4Q/Reinhardt - 2020 - Competing in a Complex Hidden Role Game with Infor.pdf:application/pdf;arXiv.org Snapshot:/home/nemo/Zotero/storage/TZ64EN6T/2005.html:text/html} } @article{ameneyro_playing_2020, title = {Playing {Carcassonne} with {Monte} {Carlo} {Tree} {Search}}, url = {http://arxiv.org/abs/2009.12974}, abstract = {Monte Carlo Tree Search (MCTS) is a relatively new sampling method with multiple variants in the literature. They can be applied to a wide variety of challenging domains including board games, video games, and energy-based problems to mention a few. In this work, we explore the use of the vanilla MCTS and the MCTS with Rapid Action Value Estimation (MCTS-RAVE) in the game of Carcassonne, a stochastic game with a deceptive scoring system where limited research has been conducted. We compare the strengths of the MCTS-based methods with the Star2.5 algorithm, previously reported to yield competitive results in the game of Carcassonne when a domain-specific heuristic is used to evaluate the game states. We analyse the particularities of the strategies adopted by the algorithms when they share a common reward system. The MCTS-based methods consistently outperformed the Star2.5 algorithm given their ability to find and follow long-term strategies, with the vanilla MCTS exhibiting a more robust game-play than the MCTS-RAVE.}, urldate = {2021-01-02}, journal = {arXiv:2009.12974 [cs]}, author = {Ameneyro, Fred Valdez and Galvan, Edgar and Morales, Anger Fernando Kuri}, month = oct, year = {2020}, note = {arXiv: 2009.12974}, keywords = {Computer Science - Artificial Intelligence}, annote = {Comment: 8 pages, 6 figures}, file = {arXiv Fulltext PDF:/home/nemo/Zotero/storage/KWUZF6UF/Ameneyro et al. - 2020 - Playing Carcassonne with Monte Carlo Tree Search.pdf:application/pdf;arXiv.org Snapshot:/home/nemo/Zotero/storage/KGFBVHU7/2009.html:text/html} } @article{tanaka_quixo_2020, title = {Quixo {Is} {Solved}}, url = {http://arxiv.org/abs/2007.15895}, abstract = {Quixo is a two-player game played on a 5\${\textbackslash}times\$5 grid where the players try to align five identical symbols. Specifics of the game require the usage of novel techniques. Using a combination of value iteration and backward induction, we propose the first complete analysis of the game. We describe memory-efficient data structures and algorithmic optimizations that make the game solvable within reasonable time and space constraints. Our main conclusion is that Quixo is a Draw game. The paper also contains the analysis of smaller boards and presents some interesting states extracted from our computations.}, urldate = {2021-01-02}, journal = {arXiv:2007.15895 [cs]}, author = {Tanaka, Satoshi and Bonnet, François and Tixeuil, Sébastien and Tamura, Yasumasa}, month = jul, year = {2020}, note = {arXiv: 2007.15895}, keywords = {Computer Science - Computer Science and Game Theory}, annote = {Comment: 19 pages}, file = {arXiv Fulltext PDF:/home/nemo/Zotero/storage/ENGW8PNA/Tanaka et al. - 2020 - Quixo Is Solved.pdf:application/pdf;arXiv.org Snapshot:/home/nemo/Zotero/storage/YZIUUDN9/2007.html:text/html} } @article{bertholon_at_2020, title = {At {Most} 43 {Moves}, {At} {Least} 29: {Optimal} {Strategies} and {Bounds} for {Ultimate} {Tic}-{Tac}-{Toe}}, shorttitle = {At {Most} 43 {Moves}, {At} {Least} 29}, url = {http://arxiv.org/abs/2006.02353}, abstract = {Ultimate Tic-Tac-Toe is a variant of the well known tic-tac-toe (noughts and crosses) board game. Two players compete to win three aligned "fields", each of them being a tic-tac-toe game. Each move determines which field the next player must play in. We show that there exist a winning strategy for the first player, and therefore that there exist an optimal winning strategy taking at most 43 moves; that the second player can hold on at least 29 rounds; and identify any optimal strategy's first two moves.}, urldate = {2021-01-02}, journal = {arXiv:2006.02353 [cs]}, author = {Bertholon, Guillaume and Géraud-Stewart, Rémi and Kugelmann, Axel and Lenoir, Théo and Naccache, David}, month = jun, year = {2020}, note = {arXiv: 2006.02353}, keywords = {Computer Science - Computer Science and Game Theory}, file = {arXiv Fulltext PDF:/home/nemo/Zotero/storage/USYULUK5/Bertholon et al. - 2020 - At Most 43 Moves, At Least 29 Optimal Strategies .pdf:application/pdf;arXiv.org Snapshot:/home/nemo/Zotero/storage/FWCEA7V4/2006.html:text/html} } @article{muller-brockhausen_new_2020, title = {A {New} {Challenge}: {Approaching} {Tetris} {Link} with {AI}}, shorttitle = {A {New} {Challenge}}, url = {http://arxiv.org/abs/2004.00377}, abstract = {Decades of research have been invested in making computer programs for playing games such as Chess and Go. This paper focuses on a new game, Tetris Link, a board game that is still lacking any scientific analysis. Tetris Link has a large branching factor, hampering a traditional heuristic planning approach. We explore heuristic planning and two other approaches: Reinforcement Learning, Monte Carlo tree search. We document our approach and report on their relative performance in a tournament. Curiously, the heuristic approach is stronger than the planning/learning approaches. However, experienced human players easily win the majority of the matches against the heuristic planning AIs. We, therefore, surmise that Tetris Link is more difficult than expected. We offer our findings to the community as a challenge to improve upon.}, urldate = {2021-01-02}, journal = {arXiv:2004.00377 [cs]}, author = {Muller-Brockhausen, Matthias and Preuss, Mike and Plaat, Aske}, month = apr, year = {2020}, note = {arXiv: 2004.00377}, keywords = {Computer Science - Artificial Intelligence}, file = {arXiv Fulltext PDF:/home/nemo/Zotero/storage/CJNXCN3A/Muller-Brockhausen et al. - 2020 - A New Challenge Approaching Tetris Link with AI.pdf:application/pdf;arXiv.org Snapshot:/home/nemo/Zotero/storage/4NNBCTUY/2004.html:text/html} }