@inproceedings { yin17, abstract = {We contribute a learning from demonstration approach for robots to acquire skills from multi-modal high-dimensional data. Both latent representations and associations of differ- ent modalities are proposed to be jointly learned through an adapted variational auto-encoder. The implementation and results are demonstrated in a robotic handwriting scenario, where the visual sensory input and the arm joint writing mo- tion are learned and coupled. We show the latent representa- tions successfully construct a task manifold for the observed sensor modalities. Moreover, the learned associations can be exploited to directly synthesize arm joint handwriting motion from an image input in an end-to-end manner. The advan- tages of learning associative latent encodings are further high- lighted with the examples of inferring upon incomplete input images. A comparison with alternative methods demonstrates the superiority of the present approach in these challenging tasks. }, booktitle = {Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI). San Francisco, California, USA.}, howpublished = {In Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17). San Francisco, California, USA.}, keywords = {Reinforcement Learning;Neural Computation;Miscellaneous;}, title = {Associate Latent Encodings in Learning from Demonstrations}, year = {2017}, author = {Hang Yin and Francisco S. Melo and Aude Billard and Ana Paiva} }