Related Publications
L. Szymanski, B. McCane and C. Atkinson. Conceptual complexity of neural networks. Neurocomputing, 469:52-64, 2022.
@article{Szymanski.etal:2021,
title = {Conceptual complexity of neural networks},
journal = {Neurocomputing},
volume = {469},
pages = {52-64},
year = {2022},
issn = {0925-2312},
doi = {https://doi.org/10.1016/j.neucom.2021.10.063},
url = {https://doi.org/10.1016/j.neucom.2021.10.063},
author = {Lech Szymanski and Brendan McCane and Craig Atkinson},
keywords = {deep learning, learning theory, complexity measures},
}
Bibtex has been copied to clipboard.
L. Szymanski, B. McCane and M. Albert. The effect of the choice of neural network depth and breadth on the
size of its hypothesis space. CoRR, abs/1806.02460, 2018.
@article{Szymanski.etal2018a,
author = {Lech Szymanski and Brendan McCane and Michael Albert},
title = {The effect of the choice of neural network depth and breadth on the
size of its hypothesis space},
journal = {CoRR},
volume = {abs/1806.02460},
year = {2018},
url = {http://arxiv.org/abs/1806.02460},
archivePrefix = {arXiv},
eprint = {1806.02460},
timestamp = {Mon, 13 Aug 2018 16:47:32 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1806-02460},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
Bibtex has been copied to clipboard.
L. Szymanski, B. McCane, W. Gao and Z. Zhou. Effects of the optimisation of the margin distribution on generalisation
in deep architectures. CoRR, abs/1704.05646, 2017.
@article{Szymanski.etal:2017b,
author = {Lech Szymanski and
Brendan McCane and
Wei Gao and
Zhi{-}Hua Zhou},
title = {Effects of the optimisation of the margin distribution on generalisation
in deep architectures},
journal = {CoRR},
volume = {abs/1704.05646},
year = {2017},
url = {http://arxiv.org/abs/1704.05646},
archivePrefix = {arXiv},
eprint = {1704.05646},
timestamp = {Mon, 13 Aug 2018 16:47:28 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/SzymanskiMGZ17},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
Bibtex has been copied to clipboard.
B. McCane and L. Szymanski. Efficiency of deep networks for radially symmetric functions. Neurocomputing, 313:119-124, 2017.
@article{mccane.etal2017a,
author = {Brendan McCane and Lech Szymanski},
title = {Efficiency of deep networks for radially symmetric functions},
journal = {Neurocomputing},
volume = {313},
pages = {119--124},
year = {2017},
url = {https://doi.org/10.1016/j.neucom.2018.06.003}
}
Bibtex has been copied to clipboard.
B. McCane and L. Szymanski. Deep networks are efficient for circular manifolds. In 2016 23rd International Conference on Pattern Recognition (ICPR), pp. 3464-3469, 2016.
@inproceedings{McCane.etal:2016a,
author={B. McCane and L. Szymanski},
booktitle={2016 23rd International Conference on Pattern Recognition (ICPR)},
title={Deep networks are efficient for circular manifolds},
year={2016},
pages={3464-3469},
url={https://doi.org/10.1109/ICPR.2016.7900170}
}
Bibtex has been copied to clipboard.
L. Szymanski and B. McCane. Deep Networks are Effective Encoders of Periodicity. IEEE Transactions on Neural Networks and Learning Systems, 25(10):1816-1827, 2014.
@article{Szymanski.etal:2014,
author={L. Szymanski and B. McCane},
journal={IEEE Transactions on Neural Networks and Learning Systems},
title={Deep Networks are Effective Encoders of Periodicity},
year={2014},
volume={25},
number={10},
pages={1816-1827},
doi={10.1109/TNNLS.2013.2296046},
url={https://doi.org/10.1109/TNNLS.2013.2296046}
}
Bibtex has been copied to clipboard.
L. Szymanski and B. McCane. Deep, super-narrow neural network is a universal classifier. In The 2012 International Joint Conference on Neural Networks (IJCNN), pp. 1-8, 2012.
@INPROCEEDINGS{Szymanski.etal:2012a,
author={L. Szymanski and B. McCane},
booktitle={The 2012 International Joint Conference on Neural Networks (IJCNN)},
title={Deep, super-narrow neural network is a universal classifier},
year={2012},
pages={1-8},
url={https://doi.org/10.1109/IJCNN.2012.6252513}
}
Bibtex has been copied to clipboard.
L. Szymanski and B. McCane. Push-pull separability objective for supervised layer-wise training of neural networks. In The 2012 International Joint Conference on Neural Networks (IJCNN), pp. 1-8, 2012.
@INPROCEEDINGS{Szymanski.etal:2012b,
author={L. Szymanski and B. McCane},
booktitle={The 2012 International Joint Conference on Neural Networks (IJCNN)},
title={Push-pull separability objective for supervised layer-wise training of neural networks},
year={2012},
pages={1-8},
url = {https://doi.org/10.1109/IJCNN.2012.6252366}
}
Bibtex has been copied to clipboard.