-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathchiara1.json
109 lines (109 loc) · 11.3 KB
/
chiara1.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
{
"metadata": [
{
"Title": "'A robust music genre classification approach for global and regional music datasets evaluation'",
"Authors": "Jefferson Martins de Sousa, Eanes Torres Pereira, Luciana Ribeiro Veloso",
"Date of publication": "1/10/2016",
"Source": "https://www.researchgate.net/publication/309396261_A_robust_music_genre_classification_approach_for_global_and_regional_music_datasets_evaluation"
},
{
"Title": "",
"Authors": "",
"Date of publication": "",
"Source": ""
}
],
"people": [
{
"Jefferson Martins de Sousa": 1,
"Eanes Torres Pereira": 1,
"Luciana Ribeiro Veloso": 1
},
{
"Jefferson Martins de Sousa": "JMdS",
"Eanes Torres Pereira": "ETP",
"Luciana Ribeiro Veloso": "LRV"
}
],
"places": [
{
"Brazil": 2
},
{
"Brazil": "BR"
},
{
"Brazil": "https://en.wikipedia.org/wiki/Brazil"
}
],
"keywords": [
{
"genre classification": 10,
"music information retrieval (mir)": 8,
"music emotion recognition (mer)": 4,
"gtzan dataset": 15,
"brazilian music": 10
},
{
"genre classification": "genreClassification",
"music information retrieval (mir)": "musicInformationRetrieval",
"music emotion recognition (mer)": "musicEmotionRecognition",
"gtzan dataset": "GTZANdataset",
"brazilian music": "brazilianMusic"
}
],
"references": [
{
"1. J. Salamon, B. Rocha, and E. G´omez, “Musical genre classification using melody features extracted from polyphonic music signals,” in IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Kyoto, Japan, 25/03/2012 2012..": 1,
"2. Y. Panagakis, C. Kotropoulos, D. O. Informatics, and G. R. Arce, “Music genre classification using locality preserving nonnegative tensor factorization and sparse representations,” in In 10th International Society for Music Information Retrieval Conference (ISMIR), 2009.": 1,
"3. B. L. Sturm, “The gtzan dataset: Its contents, its faults, their effects on evaluation, and its future use,” arXiv preprint arXiv:1306.1461, 2013.": 1,
"4. ——, “The state of the art ten years after a state of the art: Future research in music information retrieval,” Journal of New Music Research, vol. 43, no. 2, pp. 147–172, 2014.": 1,
"5. Z. Zeng, S. Zhang, H. Li, W. Liang, and H. Zheng, “A novel approach to musical genre classification using probabilistic latent semantic analysis model,” in Multimedia and Expo, 2009. ICME 2009. IEEE International Conference on. IEEE, 2009, pp. 486–489.": 1,
"6. T. Hofmann, “Unsupervised learning by probabilistic latent semantic analysis,” Machine learning, vol. 42, no. 1-2, pp. 177–196, 2001.": 1,
"7. P.-A. Manzagol, T. Bertin-Mahieux, D. Eck et al., “On the use of sparce time relative auditory codes for music.” in ISMIR, 2008, pp. 603–608.": 1,
"8. A. Holzapfel and Y. Stylianou, “Musical genre classification using nonnegative matrix factorization-based features,” Audio, Speech, and Language Processing, IEEE Transactions on, vol. 16, no. 2, pp. 424– 434, 2008.": 1,
"9. ——, “A statistical approach to musical genre classification using non-negative matrix factorization,” in Acoustics, Speech and Signal Processing, 2007. ICASSP 2007. IEEE International Conference on, vol. 2. IEEE, 2007, pp. II–693.": 1,
"10. C. Laurier, Automatic Classification of Musical Mood by Content Based Analysis. Universitat Pompeu Fabra, 2011.": 1,
"11. R. Panda and R. P. Paiva, “Mirex 2012: Mood classification tasks submission,” Machine Learning, vol. 53, no. 1-2, pp. 23–69, 2003.": 1,
"12. G. Tzanetakis and P. Cook, “Musical genre classification of audio signals,” Speech and Audio Processing, IEEE transactions on, vol. 10, no. 5, pp. 293–302, 2002.": 1,
"13. O. Lartillot and P. Toiviainen, “A matlab toolbox for musical feature extraction from audio,” in International Conference on Digital Audio Effects, 2007, pp. 237–244.": 1,
"14. D. Cabrera et al., “Psysound: A computer program for psychoacoustical analysis,” in Proceedings of the Australian Acoustical Society Confer- ence, vol. 24, 1999, pp. 47–54.": 1,
"15. Y.-H. Yang, Y.-C. Lin, Y.-F. Su, and H. H. Chen, “A regression approach to music emotion recognition,” Audio, Speech, and Language Processing, IEEE Transactions on, vol. 16, no. 2, pp. 448–457, 2008.": 1,
"16. T. Li and M. Ogihara, “Content-based music similarity search and emotion detection,” in Acoustics, Speech, and Signal Processing, 2004. Proceedings.(ICASSP’04). IEEE International Conference on, vol. 5. IEEE, 2004, pp. V–705.": 1,
"17. G. Tzanetakis and P. Cook, “Musical genre classification of audio signals,” Speech and Audio Processing, IEEE Transactions on, vol. 10, no. 5, pp. 293–302, Jul 2002.": 1,
"18. J. Bergstra, N. Casagrande, D. Erhan, D. Eck, and B. K´egl, “Aggre- gate features and adaboost for music classification,” Machine learning, vol. 65, no. 2-3, pp. 473–484, 2006.": 1,
"19. C. N. Silla Jr, A. L. Koerich, and C. A. Kaestner, “A feature selection approach for automatic music genre classification,” International Journal of Semantic Computing, vol. 3, no. 02, pp. 183–208, 2009.": 1,
"20. Z. Fu, G. Lu, K. M. Ting, and D. Zhang, “A survey of audio-based music classification and annotation,” IEEE Transactions on Multimedia, vol. 13, no. 2, pp. 303–319, 2011.": 1,
"21. T. Li and M. Ogihara, “Music genre classification with taxonomy,” in Acoustics, Speech, and Signal Processing, 2005. Proceedings. (ICASSP ’05). IEEE International Conference on, vol. 5, March 2005, pp. v/197– v/200 Vol. 5.": 1,
"22. E. Benetos and C. Kotropoulos, “A tensor-based approach for automatic music genre classification,” in Signal Processing Conference, 2008 16th European. IEEE, 2008, pp. 1–4": 1,
"23. H. Srinivasan and M. Kankanhalli, “Harmonicity and dynamics-based features for audio,” in Acoustics, Speech, and Signal Processing, 2004. Proceedings.(ICASSP’04). IEEE International Conference on, vol. 4. IEEE, 2004, pp. iv–321.": 1,
"24. C. N. Silla Jr, A. L. Koerich, and C. A. Kaestner, “The latin music database.” in ISMIR, 2008, pp. 451–456.": 1
},
{
"1. J. Salamon, B. Rocha, and E. G´omez, “Musical genre classification using melody features extracted from polyphonic music signals,” in IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Kyoto, Japan, 25/03/2012 2012..": "b01",
"2. Y. Panagakis, C. Kotropoulos, D. O. Informatics, and G. R. Arce, “Music genre classification using locality preserving nonnegative tensor factorization and sparse representations,” in In 10th International Society for Music Information Retrieval Conference (ISMIR), 2009.": "b02",
"3. B. L. Sturm, “The gtzan dataset: Its contents, its faults, their effects on evaluation, and its future use,” arXiv preprint arXiv:1306.1461, 2013.": "b03",
"4. ——, “The state of the art ten years after a state of the art: Future research in music information retrieval,” Journal of New Music Research, vol. 43, no. 2, pp. 147–172, 2014.": "b04",
"5. Z. Zeng, S. Zhang, H. Li, W. Liang, and H. Zheng, “A novel approach to musical genre classification using probabilistic latent semantic analysis model,” in Multimedia and Expo, 2009. ICME 2009. IEEE International Conference on. IEEE, 2009, pp. 486–489.": "b05",
"6. T. Hofmann, “Unsupervised learning by probabilistic latent semantic analysis,” Machine learning, vol. 42, no. 1-2, pp. 177–196, 2001.": "b06",
"7. P.-A. Manzagol, T. Bertin-Mahieux, D. Eck et al., “On the use of sparce time relative auditory codes for music.” in ISMIR, 2008, pp. 603–608.": "b07",
"8. A. Holzapfel and Y. Stylianou, “Musical genre classification using nonnegative matrix factorization-based features,” Audio, Speech, and Language Processing, IEEE Transactions on, vol. 16, no. 2, pp. 424– 434, 2008.": "b08",
"9. ——, “A statistical approach to musical genre classification using non-negative matrix factorization,” in Acoustics, Speech and Signal Processing, 2007. ICASSP 2007. IEEE International Conference on, vol. 2. IEEE, 2007, pp. II–693.": "b09",
"10. C. Laurier, Automatic Classification of Musical Mood by Content Based Analysis. Universitat Pompeu Fabra, 2011.": "b10",
"11. R. Panda and R. P. Paiva, “Mirex 2012: Mood classification tasks submission,” Machine Learning, vol. 53, no. 1-2, pp. 23–69, 2003.": "b11",
"12. G. Tzanetakis and P. Cook, “Musical genre classification of audio signals,” Speech and Audio Processing, IEEE transactions on, vol. 10, no. 5, pp. 293–302, 2002.": "b12",
"13. O. Lartillot and P. Toiviainen, “A matlab toolbox for musical feature extraction from audio,” in International Conference on Digital Audio Effects, 2007, pp. 237–244.": "b13",
"14. D. Cabrera et al., “Psysound: A computer program for psychoacoustical analysis,” in Proceedings of the Australian Acoustical Society Confer- ence, vol. 24, 1999, pp. 47–54.": "b14",
"15. Y.-H. Yang, Y.-C. Lin, Y.-F. Su, and H. H. Chen, “A regression approach to music emotion recognition,” Audio, Speech, and Language Processing, IEEE Transactions on, vol. 16, no. 2, pp. 448–457, 2008.": "b15",
"16. T. Li and M. Ogihara, “Content-based music similarity search and emotion detection,” in Acoustics, Speech, and Signal Processing, 2004. Proceedings.(ICASSP’04). IEEE International Conference on, vol. 5. IEEE, 2004, pp. V–705.": "b16",
"17. G. Tzanetakis and P. Cook, “Musical genre classification of audio signals,” Speech and Audio Processing, IEEE Transactions on, vol. 10, no. 5, pp. 293–302, Jul 2002.": "b17",
"18. J. Bergstra, N. Casagrande, D. Erhan, D. Eck, and B. K´egl, “Aggre- gate features and adaboost for music classification,” Machine learning, vol. 65, no. 2-3, pp. 473–484, 2006.": "b18",
"19. C. N. Silla Jr, A. L. Koerich, and C. A. Kaestner, “A feature selection approach for automatic music genre classification,” International Journal of Semantic Computing, vol. 3, no. 02, pp. 183–208, 2009.": "b19",
"20. Z. Fu, G. Lu, K. M. Ting, and D. Zhang, “A survey of audio-based music classification and annotation,” IEEE Transactions on Multimedia, vol. 13, no. 2, pp. 303–319, 2011.": "b20",
"21. T. Li and M. Ogihara, “Music genre classification with taxonomy,” in Acoustics, Speech, and Signal Processing, 2005. Proceedings. (ICASSP ’05). IEEE International Conference on, vol. 5, March 2005, pp. v/197– v/200 Vol. 5.": "b21",
"22. E. Benetos and C. Kotropoulos, “A tensor-based approach for automatic music genre classification,” in Signal Processing Conference, 2008 16th European. IEEE, 2008, pp. 1–4": "b22",
"23. H. Srinivasan and M. Kankanhalli, “Harmonicity and dynamics-based features for audio,” in Acoustics, Speech, and Signal Processing, 2004. Proceedings.(ICASSP’04). IEEE International Conference on, vol. 4. IEEE, 2004, pp. iv–321.": "b23",
"24. C. N. Silla Jr, A. L. Koerich, and C. A. Kaestner, “The latin music database.” in ISMIR, 2008, pp. 451–456.": "b24"
}
]
}