booktitle = {2014 {IEEE} {Conference} on {Computational} {Intelligence} and {Games}},
publisher = {IEEE},
author = {Guhe, Markus and Lascarides, Alex},
month = aug,
year = {2014},
pages = {1--8},
file = {Submitted Version:/home/nemo/Zotero/storage/ITK52TEL/Guhe and Lascarides - 2014 - The effectiveness of persuasion in The Settlers of.pdf:application/pdf;Submitted Version:/home/nemo/Zotero/storage/HTNFAWSA/Guhe and Lascarides - 2014 - The effectiveness of persuasion in The Settlers of.pdf:application/pdf}
}
@inproceedings{guhe_effectiveness_2014-1,
title = {The effectiveness of persuasion in {The} {Settlers} of {Catan}},
booktitle = {2014 {IEEE} {Conference} on {Computational} {Intelligence} and {Games}},
publisher = {IEEE},
author = {Guhe, Markus and Lascarides, Alex},
month = aug,
year = {2014},
file = {Submitted Version:/home/nemo/Zotero/storage/WJWTC8A9/Guhe and Lascarides - 2014 - The effectiveness of persuasion in The Settlers of.pdf:application/pdf}
}
@article{boda_avoiding_2018,
title = {Avoiding {Revenge} {Using} {Optimal} {Opponent} {Ranking} {Strategy} in the {Board} {Game} {Catan}},
booktitle = {2014 {IEEE} {Conference} on {Computational} {Intelligence} and {Games}},
publisher = {IEEE},
author = {Guhe, Markus and Lascarides, Alex},
month = aug,
year = {2014},
file = {Submitted Version:/home/nemo/Zotero/storage/E7ARZVSI/Guhe and Lascarides - 2014 - Game strategies for The Settlers of Catan.pdf:application/pdf}
}
@incollection{szita_monte-carlo_2010,
title = {Monte-{Carlo} {Tree} {Search} in {Settlers} of {Catan}},
author = {Xenou, Konstantia and Chalkiadakis, Georgios and Afantenos, Stergos},
year = {2019},
doi = {10.1007/978-3-030-14174-5_16},
pages = {233--248},
file = {Accepted Version:/home/nemo/Zotero/storage/MXL35B77/Xenou et al. - 2019 - Deep Reinforcement Learning in Strategic Board Gam.pdf:application/pdf}
}
@article{maliphant_mini-risk_1990,
title = {Mini-{Risk}: {Strategies} for a {Simplified} {Board} {Game}},
volume = {41},
url = {https://doi.org/10.1057%2Fjors.1990.2},
doi = {10.1057/jors.1990.2},
number = {1},
journal = {Journal of the Operational Research Society},
author = {Maliphant, Sarah A. and Smith, David K.},
month = jan,
year = {1990},
note = {Publisher: Informa UK Limited},
pages = {9--16},
file = {Full Text:/home/nemo/Zotero/storage/X6ZHBQT8/Maliphant and Smith - 1990 - Mini-Risk Strategies for a Simplified Board Game.pdf:application/pdf}
}
@inproceedings{neves_learning_2002,
title = {Learning the risk board game with classifier systems},
url = {https://doi.org/10.1145%2F508791.508904},
doi = {10.1145/508791.508904},
booktitle = {Proceedings of the 2002 {ACM} symposium on {Applied} computing - {SAC} {\textbackslash}textquotesingle02},
publisher = {ACM Press},
author = {Neves, Atila and Brasāo, Osvaldo and Rosa, Agostinho},
year = {2002},
file = {Full Text:/home/nemo/Zotero/storage/V8H6XI4Y/Neves et al. - 2002 - Learning the risk board game with classifier syste.pdf:application/pdf}
}
@article{tan_markov_1997,
title = {Markov {Chains} and the {RISK} {Board} {Game}},
author = {Bard, Nolan and Foerster, Jakob N. and Chandar, Sarath and Burch, Neil and Lanctot, Marc and Song, H. Francis and Parisotto, Emilio and Dumoulin, Vincent and Moitra, Subhodeep and Hughes, Edward and Dunning, Iain and Mourad, Shibl and Larochelle, Hugo and Bellemare, Marc G. and Bowling, Michael},
month = mar,
year = {2020},
note = {Publisher: Elsevier BV},
pages = {103216},
file = {Full Text:/home/nemo/Zotero/storage/QK4PLTNC/Bard et al. - 2020 - The Hanabi challenge A new frontier for AI resear.pdf:application/pdf}
booktitle = {2019 {IEEE} {Conference} on {Games} ({CoG})},
publisher = {IEEE},
author = {Canaan, Rodrigo and Togelius, Julian and Nealen, Andy and Menzel, Stefan},
month = aug,
year = {2019},
file = {Submitted Version:/home/nemo/Zotero/storage/9WT5YA3E/Canaan et al. - 2019 - Diverse Agents for Ad-Hoc Cooperation in Hanabi.pdf:application/pdf}
author = {Cowling, Peter I. and Ward, Colin D. and Powley, Edward J.},
month = dec,
year = {2012},
note = {Publisher: Institute of Electrical and Electronics Engineers (IEEE)},
pages = {241--257},
file = {Accepted Version:/home/nemo/Zotero/storage/JI5MQ857/Cowling et al. - 2012 - Ensemble Determinization in Monte Carlo Tree Searc.pdf:application/pdf}
}
@article{bosch_optimal_2000,
title = {Optimal {Card}-{Collecting} {Strategies} for {Magic}: {The} {Gathering}},
file = {Woolford and Watson - 2017 - SCOUT A Case-Based Reasoning Agent for Playing Ra.pdf:/home/nemo/Zotero/storage/LMIXD5XY/Woolford and Watson - 2017 - SCOUT A Case-Based Reasoning Agent for Playing Ra.pdf:application/pdf}
author = {Maynard, Ken and Moss, Patrick and Whitehead, Marcus and Narayanan, S. and Garay, Matt and Brannon, Nathan and Kantamneni, Raj Gopal and Kustra, Todd},
month = may,
year = {2001},
note = {Publisher: Wiley},
pages = {88--98},
file = {Full Text:/home/nemo/Zotero/storage/PG6NUX5X/Maynard et al. - 2001 - Modeling expert problem solving in a game of chanc.pdf:application/pdf}
}
@incollection{oka_systematic_2016,
title = {Systematic {Selection} of {N}-{Tuple} {Networks} for 2048},
booktitle = {2014 {IEEE} {Conference} on {Computational} {Intelligence} and {Games}},
publisher = {IEEE},
author = {Rodgers, Philip and Levine, John},
month = aug,
year = {2014},
file = {Full Text:/home/nemo/Zotero/storage/GWVBHIAP/Rodgers and Levine - 2014 - An investigation into 2048 AI strategies.pdf:application/pdf}
}
@article{anthony_learning_2020,
title = {Learning to {Play} {No}-{Press} {Diplomacy} with {Best} {Response} {Policy} {Iteration}},
url = {http://arxiv.org/abs/2006.04635v2},
journal = {arxiv:2006.04635},
author = {Anthony, Thomas and Eccles, Tom and Tacchetti, Andrea and Kramár, János and Gemp, Ian and Hudson, Thomas C. and Porcel, Nicolas and Lanctot, Marc and Pérolat, Julien and Everett, Richard and Singh, Satinder and Graepel, Thore and Bachrach, Yoram},
year = {2020},
file = {Full Text:/home/nemo/Zotero/storage/RKH36CBQ/Anthony et al. - 2020 - Learning to Play No-Press Diplomacy with Best Resp.pdf:application/pdf}
}
@article{paquette_no_2019,
title = {No {Press} {Diplomacy}: {Modeling} {Multi}-{Agent} {Gameplay}},
url = {http://arxiv.org/abs/1909.02128v2},
journal = {arxiv:1909.02128},
author = {Paquette, Philip and Lu, Yuchen and Bocco, Steven and Smith, Max O. and Ortiz-Gagne, Satya and Kummerfeld, Jonathan K. and Singh, Satinder and Pineau, Joelle and Courville, Aaron},
year = {2019},
file = {Full Text:/home/nemo/Zotero/storage/YHUCJAG8/Paquette et al. - 2019 - No Press Diplomacy Modeling Multi-Agent Gameplay.pdf:application/pdf}
title = {Monte {Carlo} {Methods} for the {Game} {Kingdomino}},
url = {http://arxiv.org/abs/1807.04458v2},
journal = {arxiv:1807.04458},
author = {Gedda, Magnus and Lagerkvist, Mikael Z. and Butler, Martin},
year = {2018},
file = {Full Text:/home/nemo/Zotero/storage/T2BSPBPV/Gedda et al. - 2018 - Monte Carlo Methods for the Game Kingdomino.pdf:application/pdf}
}
@article{nguyen_np-completeness_2019,
title = {{NP}-completeness of the game {Kingdomino}},
url = {http://arxiv.org/abs/1909.02849v3},
journal = {arxiv:1909.02849},
author = {Nguyen, Viet-Ha and Perrot, Kevin and Vallet, Mathieu},
year = {2019},
file = {Full Text:/home/nemo/Zotero/storage/32L6ZKCA/Nguyen et al. - 2019 - NP-completeness of the game Kingdomino.pdf:application/pdf}
}
@article{lerer_improving_2019,
title = {Improving {Policies} via {Search} in {Cooperative} {Partially} {Observable} {Games}},
url = {http://arxiv.org/abs/1912.02318v1},
journal = {arxiv:1912.02318},
author = {Lerer, Adam and Hu, Hengyuan and Foerster, Jakob and Brown, Noam},
year = {2019},
file = {Full Text:/home/nemo/Zotero/storage/F2N99DK9/Lerer et al. - 2019 - Improving Policies via Search in Cooperative Parti.pdf:application/pdf}
}
@article{baffier_hanabi_2016,
title = {Hanabi is {NP}-hard, {Even} for {Cheaters} who {Look} at {Their} {Cards}},
url = {http://arxiv.org/abs/1603.01911v3},
journal = {arxiv:1603.01911},
author = {Baffier, Jean-Francois and Chiu, Man-Kwun and Diez, Yago and Korman, Matias and Mitsou, Valia and Renssen, André van and Roeloffzen, Marcel and Uno, Yushi},
year = {2016},
file = {Full Text:/home/nemo/Zotero/storage/XMPLK7RJ/Baffier et al. - 2016 - Hanabi is NP-hard, Even for Cheaters who Look at T.pdf:application/pdf}
}
@article{canaan_generating_2020,
title = {Generating and {Adapting} to {Diverse} {Ad}-{Hoc} {Cooperation} {Agents} in {Hanabi}},
url = {http://arxiv.org/abs/2004.13710v2},
journal = {arxiv:2004.13710},
author = {Canaan, Rodrigo and Gao, Xianbo and Togelius, Julian and Nealen, Andy and Menzel, Stefan},
year = {2020},
file = {Full Text:/home/nemo/Zotero/storage/PDZQXHYY/Canaan et al. - 2020 - Generating and Adapting to Diverse Ad-Hoc Cooperat.pdf:application/pdf}
}
@article{canaan_evaluating_2020,
title = {Evaluating the {Rainbow} {DQN} {Agent} in {Hanabi} with {Unseen} {Partners}},
url = {http://arxiv.org/abs/2004.13291v1},
journal = {arxiv:2004.13291},
author = {Canaan, Rodrigo and Gao, Xianbo and Chung, Youjin and Togelius, Julian and Nealen, Andy and Menzel, Stefan},
year = {2020},
file = {Full Text:/home/nemo/Zotero/storage/DEVP82UJ/Canaan et al. - 2020 - Evaluating the Rainbow DQN Agent in Hanabi with Un.pdf:application/pdf}
}
@article{biderman_magic_2020,
title = {Magic: the {Gathering} is as {Hard} as {Arithmetic}},
url = {http://arxiv.org/abs/2003.05119v1},
journal = {arxiv:2003.05119},
author = {Biderman, Stella},
year = {2020},
file = {Full Text:/home/nemo/Zotero/storage/N83MTIN9/Biderman - 2020 - Magic the Gathering is as Hard as Arithmetic.pdf:application/pdf}
}
@article{churchill_magic_2019,
title = {Magic: {The} {Gathering} is {Turing} {Complete}},
url = {http://arxiv.org/abs/1904.09828v2},
journal = {arxiv:1904.09828},
author = {Churchill, Alex and Biderman, Stella and Herrick, Austin},
year = {2019},
file = {Full Text:/home/nemo/Zotero/storage/5NW5WTWK/Churchill et al. - 2019 - Magic The Gathering is Turing Complete.pdf:application/pdf}
}
@article{zilio_neural_2018,
title = {Neural {Networks} {Models} for {Analyzing} {Magic}: the {Gathering} {Cards}},
url = {http://arxiv.org/abs/1810.03744v1},
journal = {arxiv:1810.03744},
author = {Zilio, Felipe and Prates, Marcelo},
year = {2018},
file = {Full Text:/home/nemo/Zotero/storage/VX32HLNF/Zilio et al. - 2018 - Neural Networks Models for Analyzing Magic the Ga.pdf:application/pdf}
}
@inproceedings{grichshenko_using_2020,
title = {Using {Tabu} {Search} {Algorithm} for {Map} {Generation} in the {Terra} {Mystica} {Tabletop} {Game}},
booktitle = {Proceedings of the 2020 4th {International} {Conference} on {Intelligent} {Systems}, {Metaheuristics} \& {Swarm} {Intelligence}},
publisher = {ACM},
author = {Grichshenko, Alexandr and Araújo, Luiz Jonatã Pires de and Gimaeva, Susanna and Brown, Joseph Alexander},
month = mar,
year = {2020},
file = {Submitted Version:/home/nemo/Zotero/storage/4LSZ3R5D/Grichshenko et al. - 2020 - Using Tabu Search Algorithm for Map Generation in .pdf:application/pdf}
}
@article{migdal_mathematical_2010,
title = {A mathematical model of the {Mafia} game},
url = {http://arxiv.org/abs/1009.1031v3},
journal = {arxiv:1009.1031},
author = {Migdał, Piotr},
year = {2010},
file = {Full Text:/home/nemo/Zotero/storage/RCJ7EPW7/Migdał - 2010 - A mathematical model of the Mafia game.pdf:application/pdf}
}
@article{demaine_complexity_2010,
title = {The complexity of {UNO}},
url = {http://arxiv.org/abs/1003.2851v3},
journal = {arxiv:1003.2851},
author = {Demaine, Erik D. and Demaine, Martin L. and Harvey, Nicholas J. A. and Uehara, Ryuhei and Uno, Takeaki and Uno, Yushi},
year = {2010},
file = {Full Text:/home/nemo/Zotero/storage/KNHHMQC3/Demaine et al. - 2010 - The complexity of UNO.pdf:application/pdf}
}
@article{almanza_trainyard_2016,
title = {Trainyard is {NP}-{Hard}},
url = {http://arxiv.org/abs/1603.00928v1},
journal = {arxiv:1603.00928},
author = {Almanza, Matteo and Leucci, Stefano and Panconesi, Alessandro},
year = {2016},
file = {Full Text:/home/nemo/Zotero/storage/6XZDBHIF/Almanza et al. - 2016 - Trainyard is NP-Hard.pdf:application/pdf}
}
@article{langerman_threes_2015,
title = {Threes!, {Fives}, 1024!, and 2048 are {Hard}},
url = {http://arxiv.org/abs/1505.04274v1},
journal = {arxiv:1505.04274},
author = {Langerman, Stefan and Uno, Yushi},
year = {2015},
file = {Full Text:/home/nemo/Zotero/storage/EKHK8LWW/Langerman and Uno - 2015 - Threes!, Fives, 1024!, and 2048 are Hard.pdf:application/pdf}
}
@article{eppstein_making_2018,
title = {Making {Change} in 2048},
url = {http://arxiv.org/abs/1804.07396v1},
journal = {arxiv:1804.07396},
author = {Eppstein, David},
year = {2018},
file = {Full Text:/home/nemo/Zotero/storage/MTEUWS7P/Eppstein - 2018 - Making Change in 2048.pdf:application/pdf}
}
@article{das_analysis_2018,
title = {Analysis of the {Game} "2048" and its {Generalization} in {Higher} {Dimensions}},
url = {http://arxiv.org/abs/1804.07393v2},
journal = {arxiv:1804.07393},
author = {Das, Madhuparna and Paul, Goutam},
year = {2018},
file = {Full Text:/home/nemo/Zotero/storage/IVPCDJKF/Das and Paul - 2018 - Analysis of the Game 2048 and its Generalization.pdf:application/pdf}
}
@article{yeh_multi-stage_2016,
title = {Multi-{Stage} {Temporal} {Difference} {Learning} for 2048-like {Games}},
url = {http://arxiv.org/abs/1606.07374v2},
journal = {arxiv:1606.07374},
author = {Yeh, Kun-Hao and Wu, I.-Chen and Hsueh, Chu-Hsuan and Chang, Chia-Chuan and Liang, Chao-Chin and Chiang, Han},
year = {2016},
file = {Full Text:/home/nemo/Zotero/storage/XYA7M7R4/Yeh et al. - 2016 - Multi-Stage Temporal Difference Learning for 2048-.pdf:application/pdf}
}
@article{mehta_2048_2014,
title = {2048 is ({PSPACE}) {Hard}, but {Sometimes} {Easy}},
url = {http://arxiv.org/abs/1408.6315v1},
journal = {arxiv:1408.6315},
author = {Mehta, Rahul},
year = {2014},
file = {Full Text:/home/nemo/Zotero/storage/TDMX7RFI/Mehta - 2014 - 2048 is (PSPACE) Hard, but Sometimes Easy.pdf:application/pdf}
}
@misc{noauthor_settlers_nodate,
title = {Settlers of {Catan} bot trained using reinforcement learning},
url = {https://jonzia.github.io/Catan/}
}
@inproceedings{guhe_trading_2012,
title = {Trading in a multiplayer board game: {Towards} an analysis of non-cooperative dialogue},
volume = {34},
booktitle = {Proceedings of the {Annual} {Meeting} of the {Cognitive} {Science} {Society}},
author = {Guhe, Markus and Lascarides, Alex},
year = {2012},
note = {Issue: 34},
file = {Guhe and Lascarides - 2012 - Trading in a multiplayer board game Towards an an.pdf:/home/nemo/Zotero/storage/AT8UHTXM/Guhe and Lascarides - 2012 - Trading in a multiplayer board game Towards an an.pdf:application/pdf}
}
@article{noauthor_pomcp_nodate,
title = {{POMCP} with {Human} {Preferencesin} {Settlers} of {Catan}},
file = {POMCP with Human Preferencesin Settlers of Catan.pdf:/home/nemo/Zotero/storage/CA62SLVK/POMCP with Human Preferencesin Settlers of Catan.pdf:application/pdf}
file = {An Intelligent Artificial Player for the Game of R.pdf:/home/nemo/Zotero/storage/89MUCUE7/An Intelligent Artificial Player for the Game of R.pdf:application/pdf}
}
@article{noauthor_risky_nodate,
title = {{RISKy} {Business}: {An} {In}-{Depth} {Look} at the {Game} {RISK}},
file = {RISKy Business An In-Depth Look at the Game RISK.pdf:/home/nemo/Zotero/storage/PT8CWUJ5/RISKy Business An In-Depth Look at the Game RISK.pdf:application/pdf}
}
@article{noauthor_risk_nodate,
title = {{RISK} {Board} {Game} ‐ {Battle} {Outcome} {Analysis}},
title = {State {Representation} and {Polyomino} {Placement} for the {Game} {Patchwork}},
url = {http://arxiv.org/abs/2001.04233},
abstract = {Modern board games are a rich source of entertainment for many people, but also contain interesting and challenging structures for game playing research and implementing game playing agents. This paper studies the game Patchwork, a two player strategy game using polyomino tile drafting and placement. The core polyomino placement mechanic is implemented in a constraint model using regular constraints, extending and improving the model in (Lagerkvist, Pesant, 2008) with: explicit rotation handling; optional placements; and new constraints for resource usage. Crucial for implementing good game playing agents is to have great heuristics for guiding the search when faced with large branching factors. This paper divides placing tiles into two parts: a policy used for placing parts and an evaluation used to select among different placements. Policies are designed based on classical packing literature as well as common standard constraint programming heuristics. For evaluation, global propagation guided regret is introduced, choosing placements based on not ruling out later placements. Extensive evaluations are performed, showing the importance of using a good evaluation and that the proposed global propagation guided regret is a very effective guide.},
file = {Full Text:/home/nemo/Zotero/storage/JVLQG3BV/State Representation and Polyomino Placement for t.pdf:application/pdf}
}
@article{lagerkvist_nmbr9_2020,
title = {Nmbr9 as a {Constraint} {Programming} {Challenge}},
url = {http://arxiv.org/abs/2001.04238},
abstract = {Modern board games are a rich source of interesting and new challenges for combinatorial problems. The game Nmbr9 is a solitaire style puzzle game using polyominoes. The rules of the game are simple to explain, but modelling the game effectively using constraint programming is hard. This abstract presents the game, contributes new generalized variants of the game suitable for benchmarking and testing, and describes a model for the presented variants. The question of the top possible score in the standard game is an open challenge.},
abstract = {A unique behavior of humans is modifying one’s unobservable behavior based on the reaction of others for cooperation. We used a card game called Hanabi as an evaluation task of imitating human reflective intelligence with artificial intelligence. Hanabi is a cooperative card game with incomplete information. A player cooperates with an opponent in building several card sets constructed with the same color and ordered numbers. However, like a blind man's bluff, each player sees the cards of all other players except his/her own. Also, communication between players is restricted to information about the same numbers and colors, and the player is required to read his/his opponent's intention with the opponent's hand, estimate his/her cards with incomplete information, and play one of them for building a set. We compared human play with several simulated strategies. The results indicate that the strategy with feedbacks from simulated opponent's viewpoints achieves more score than other strategies.},
file = {Eger and Martens - 2017 - A Browser-based Interface for the Exploration and .pdf:/home/nemo/Zotero/storage/RE7PCTMZ/Eger and Martens - 2017 - A Browser-based Interface for the Exploration and .pdf:application/pdf}
}
@article{gottwald_i_nodate,
title = {I see what you see: {Integrating} eye tracking into {Hanabi} playing agents},
abstract = {Humans’ eye movements convey a lot of information about their intentions, often unconsciously. Intelligent agents that cooperate with humans in various domains can benefit from interpreting this information. This paper contains a preliminary look at how eye tracking could be useful for agents that play the cooperative card game Hanabi with human players. We outline several situations in which an AI agent can utilize gaze information, and present an outlook on how we plan to integrate this with reimplementations of contemporary Hanabi agents.},
language = {en},
author = {Gottwald, Eva Tallula and Eger, Markus and Martens, Chris},
file = {Gottwald et al. - I see what you see Integrating eye tracking into .pdf:/home/nemo/Zotero/storage/5STNIF33/Gottwald et al. - I see what you see Integrating eye tracking into .pdf:application/pdf}
}
@misc{noauthor_state_nodate-2,
title = {State of the art {Hanabi} bots + simulation framework in rust},