@inproceedings { witwicki12, abstract = {Forming commitments about abstract influences that agents can exert on one another has shown promise in improving the tractability of multiagent coordination under uncertainty. We now extend this approach to domains with meta-level reward-model uncertainty. Intuitively, an agent may actually improve collective performance by forming a weaker commitment that allows more latitude to adapt its policy as it refines its reward model. To account for reward uncertainty as such, we introduce and contrast three new techniques.}, address = {Valencia, Spain}, booktitle = {In Proceedings of the Eleventh International Conference on Autonomous Agents and Multiagent Systems (AAMAS-2012)}, keywords = {Multiagent Planning, Transition-Decoupled POMDP, Model Uncertainty, Bayesian Rewards, Influence Abstraction, Commitments}, month = {June}, pages = {}, title = {Planning and Evaluating Multiagent Influences Under Reward Uncertainty (Extended Abstract)}, year = {2012}, author = {Stefan Witwicki and Inn-Tung Chen and Edmund Durfee and Satinder Singh} }