Add autoencoders section in State of the Art
This commit is contained in:
@@ -810,3 +810,207 @@
|
||||
edition = {Third},
|
||||
pages = {209, 260},
|
||||
}
|
||||
|
||||
@article{ABIODUN2018e00938,
|
||||
title = {State-of-the-art in artificial neural network applications:
|
||||
A survey},
|
||||
journal = {Heliyon},
|
||||
volume = 4,
|
||||
number = 11,
|
||||
pages = {e00938},
|
||||
year = 2018,
|
||||
issn = {2405-8440},
|
||||
doi = {https://doi.org/10.1016/j.heliyon.2018.e00938},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/S2405844018332067},
|
||||
author = {Oludare Isaac Abiodun and Aman Jantan and Abiodun Esther
|
||||
Omolara and Kemi Victoria Dada and Nachaat AbdElatif Mohamed
|
||||
and Humaira Arshad},
|
||||
keywords = {Computer science},
|
||||
abstract = {This is a survey of neural network applications in the
|
||||
real-world scenario. It provides a taxonomy of artificial
|
||||
neural networks (ANNs) and furnish the reader with knowledge
|
||||
of current and emerging trends in ANN applications research
|
||||
and area of focus for researchers. Additionally, the study
|
||||
presents ANN application challenges, contributions, compare
|
||||
performances and critiques methods. The study covers many
|
||||
applications of ANN techniques in various disciplines which
|
||||
include computing, science, engineering, medicine,
|
||||
environmental, agriculture, mining, technology, climate,
|
||||
business, arts, and nanotechnology, etc. The study assesses
|
||||
ANN contributions, compare performances and critiques methods.
|
||||
The study found that neural-network models such as feedforward
|
||||
and feedback propagation artificial neural networks are
|
||||
performing better in its application to human problems.
|
||||
Therefore, we proposed feedforward and feedback propagation
|
||||
ANN models for research focus based on data analysis factors
|
||||
like accuracy, processing speed, latency, fault tolerance,
|
||||
volume, scalability, convergence, and performance. Moreover,
|
||||
we recommend that instead of applying a single method, future
|
||||
research can focus on combining ANN models into one
|
||||
network-wide application.}
|
||||
}
|
||||
|
||||
@article{LIU201711,
|
||||
title = {A survey of deep neural network architectures and their
|
||||
applications},
|
||||
journal = {Neurocomputing},
|
||||
volume = 234,
|
||||
pages = {11-26},
|
||||
year = 2017,
|
||||
issn = {0925-2312},
|
||||
doi = {https://doi.org/10.1016/j.neucom.2016.12.038},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/S0925231216315533},
|
||||
author = {Weibo Liu and Zidong Wang and Xiaohui Liu and Nianyin Zeng
|
||||
and Yurong Liu and Fuad E. Alsaadi},
|
||||
keywords = {Autoencoder, Convolutional neural network, Deep learning,
|
||||
Deep belief network, Restricted Boltzmann machine},
|
||||
abstract = {Since the proposal of a fast learning algorithm for deep
|
||||
belief networks in 2006, the deep learning techniques have
|
||||
drawn ever-increasing research interests because of their
|
||||
inherent capability of overcoming the drawback of traditional
|
||||
algorithms dependent on hand-designed features. Deep learning
|
||||
approaches have also been found to be suitable for big data
|
||||
analysis with successful applications to computer vision,
|
||||
pattern recognition, speech recognition, natural language
|
||||
processing, and recommendation systems. In this paper, we
|
||||
discuss some widely-used deep learning architectures and their
|
||||
practical applications. An up-to-date overview is provided on
|
||||
four deep learning architectures, namely, autoencoder,
|
||||
convolutional neural network, deep belief network, and
|
||||
restricted Boltzmann machine. Different types of deep neural
|
||||
networks are surveyed and recent progresses are summarized.
|
||||
Applications of deep learning techniques on some selected
|
||||
areas (speech recognition, pattern recognition and computer
|
||||
vision) are highlighted. A list of future research topics are
|
||||
finally given with clear justifications.}
|
||||
}
|
||||
|
||||
@misc{chervinskii_2015,
|
||||
title = {Autoencoder structure},
|
||||
url = {https://commons.wikimedia.org/wiki/File:Autoencoder_structure.png},
|
||||
journal = {Wikimedia},
|
||||
author = {Chervinskii},
|
||||
year = 2015,
|
||||
month = {Dec}
|
||||
}
|
||||
|
||||
@book{Goodfellow-et-al-2016,
|
||||
title = {Deep Learning},
|
||||
author = {Ian Goodfellow and Yoshua Bengio and Aaron Courville},
|
||||
publisher = {MIT Press},
|
||||
note = {\url{http://www.deeplearningbook.org}},
|
||||
year = 2016
|
||||
}
|
||||
|
||||
@Article{Lewis_2020,
|
||||
author = {Lewis, Mike and Liu, Yinhan and Goyal, Naman and
|
||||
Ghazvininejad, Marjan and Mohamed, Abdelrahman and Levy, Omer
|
||||
and Stoyanov, Veselin and Zettlemoyer, Luke},
|
||||
title = {BART: Denoising Sequence-to-Sequence Pre-training for
|
||||
Natural Language Generation, Translation, and Comprehension},
|
||||
journal = {Proceedings of the 58th Annual Meeting of the Association
|
||||
for Computational Linguistics},
|
||||
year = 2020,
|
||||
doi = {10.18653/v1/2020.acl-main.703},
|
||||
url = {http://dx.doi.org/10.18653/v1/2020.acl-main.703},
|
||||
publisher = {Association for Computational Linguistics}
|
||||
}
|
||||
|
||||
@article{bigdeli17_image_restor_using_autoen_prior,
|
||||
author = {Bigdeli, Siavash Arjomand and Zwicker, Matthias},
|
||||
title = {Image Restoration Using Autoencoding Priors},
|
||||
journal = {CoRR},
|
||||
year = 2017,
|
||||
url = {http://arxiv.org/abs/1703.09964v1},
|
||||
abstract = {We propose to leverage denoising autoencoder networks as
|
||||
priors to address image restoration problems. We build on the
|
||||
key observation that the output of an optimal denoising
|
||||
autoencoder is a local mean of the true data density, and the
|
||||
autoencoder error (the difference between the output and input
|
||||
of the trained autoencoder) is a mean shift vector. We use the
|
||||
magnitude of this mean shift vector, that is, the distance to
|
||||
the local mean, as the negative log likelihood of our natural
|
||||
image prior. For image restoration, we maximize the likelihood
|
||||
using gradient descent by backpropagating the autoencoder
|
||||
error. A key advantage of our approach is that we do not need
|
||||
to train separate networks for different image restoration
|
||||
tasks, such as non-blind deconvolution with different kernels,
|
||||
or super-resolution at different magnification factors. We
|
||||
demonstrate state of the art results for non-blind
|
||||
deconvolution and super-resolution using the same autoencoding
|
||||
prior.},
|
||||
archivePrefix = {arXiv},
|
||||
eprint = {1703.09964},
|
||||
primaryClass = {cs.CV},
|
||||
}
|
||||
|
||||
@article{makhzani15_adver_autoen,
|
||||
author = {Makhzani, Alireza and Shlens, Jonathon and Jaitly, Navdeep
|
||||
and Goodfellow, Ian and Frey, Brendan},
|
||||
title = {Adversarial Autoencoders},
|
||||
journal = {CoRR},
|
||||
year = 2015,
|
||||
url = {http://arxiv.org/abs/1511.05644v2},
|
||||
abstract = {In this paper, we propose the "adversarial autoencoder"
|
||||
(AAE), which is a probabilistic autoencoder that uses the
|
||||
recently proposed generative adversarial networks (GAN) to
|
||||
perform variational inference by matching the aggregated
|
||||
posterior of the hidden code vector of the autoencoder with an
|
||||
arbitrary prior distribution. Matching the aggregated
|
||||
posterior to the prior ensures that generating from any part
|
||||
of prior space results in meaningful samples. As a result, the
|
||||
decoder of the adversarial autoencoder learns a deep
|
||||
generative model that maps the imposed prior to the data
|
||||
distribution. We show how the adversarial autoencoder can be
|
||||
used in applications such as semi-supervised classification,
|
||||
disentangling style and content of images, unsupervised
|
||||
clustering, dimensionality reduction and data visualization.
|
||||
We performed experiments on MNIST, Street View House Numbers
|
||||
and Toronto Face datasets and show that adversarial
|
||||
autoencoders achieve competitive results in generative
|
||||
modeling and semi-supervised classification tasks.},
|
||||
archivePrefix = {arXiv},
|
||||
eprint = {1511.05644v2},
|
||||
primaryClass = {cs.LG},
|
||||
}
|
||||
|
||||
@Article{Yoo_2020,
|
||||
author = {Yoo, Jaeyoung and Lee, Hojun and Kwak, Nojun},
|
||||
title = {Unpriortized Autoencoder For Image Generation},
|
||||
journal = {2020 IEEE International Conference on Image Processing
|
||||
(ICIP)},
|
||||
year = 2020,
|
||||
month = {Oct},
|
||||
doi = {10.1109/icip40778.2020.9191173},
|
||||
url = {http://dx.doi.org/10.1109/ICIP40778.2020.9191173},
|
||||
ISBN = 9781728163956,
|
||||
publisher = {IEEE}
|
||||
}
|
||||
@article{kaiser18_discr_autoen_sequen_model,
|
||||
author = {Kaiser, Łukasz and Bengio, Samy},
|
||||
title = {Discrete Autoencoders for Sequence Models},
|
||||
journal = {CoRR},
|
||||
year = 2018,
|
||||
url = {http://arxiv.org/abs/1801.09797v1},
|
||||
abstract = {Recurrent models for sequences have been recently
|
||||
successful at many tasks, especially for language modeling and
|
||||
machine translation. Nevertheless, it remains challenging to
|
||||
extract good representations from these models. For instance,
|
||||
even though language has a clear hierarchical structure going
|
||||
from characters through words to sentences, it is not apparent
|
||||
in current language models. We propose to improve the
|
||||
representation in sequence models by augmenting current
|
||||
approaches with an autoencoder that is forced to compress the
|
||||
sequence through an intermediate discrete latent space. In
|
||||
order to propagate gradients though this discrete
|
||||
representation we introduce an improved semantic hashing
|
||||
technique. We show that this technique performs well on a
|
||||
newly proposed quantitative efficiency measure. We also
|
||||
analyze latent codes produced by the model showing how they
|
||||
correspond to words and phrases. Finally, we present an
|
||||
application of the autoencoder-augmented model to generating
|
||||
diverse translations.},
|
||||
archivePrefix = {arXiv},
|
||||
eprint = {1801.09797v1},
|
||||
primaryClass = {cs.LG},
|
||||
}
|
||||
|
||||
BIN
assets/figures/autoencoder.png
Normal file
BIN
assets/figures/autoencoder.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 48 KiB |
Reference in New Issue
Block a user