@inproceedings{28ced2dfacf54a57bdd43ccc6585330e,
title = "Generative Adversarial Source Separation",
abstract = "Generative source separation methods such as non-negative matrix factorization (NMF) or auto-encoders, rely on the assumption of an output probability density. Generative Adversarial Networks (GANs) can learn data distributions without needing a parametric assumption on the output density. We show on a speech source separation experiment that, a multilayer perceptron trained with a Wasserstein-GAN formulation outperforms NMF, auto-encoders trained with maximum likelihood, and variational auto-encoders in terms of source to distortion ratio.",
keywords = "Generative Adversarial Networks, Generative Models, Source Separation",
author = "Subakan, {Y. Cem} and Paris Smaragdis",
note = "Funding Information: This work is supported by NSF grant #1453104. Publisher Copyright: {\textcopyright} 2018 IEEE.; 2018 IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2018 ; Conference date: 15-04-2018 Through 20-04-2018",
year = "2018",
month = sep,
day = "10",
doi = "10.1109/ICASSP.2018.8461671",
language = "English (US)",
isbn = "9781538646588",
series = "ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "26--30",
booktitle = "2018 IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2018 - Proceedings",
address = "United States",
}