2016 |
Journal Articles |
Emmanuel Vincent; Shinji Watanabe; Aditya Arie Nugraha; Jon Barker; Ricard Marxer An analysis of environment, microphone and data simulation mismatches in robust speech recognition Journal Article Computer Speech & Language, 2016. @article{nugraha2016csl,
title = {An analysis of environment, microphone and data simulation mismatches in robust speech recognition}, author = {Emmanuel Vincent and Shinji Watanabe and Aditya Arie Nugraha and Jon Barker and Ricard Marxer }, url = {http://www.sciencedirect.com/science/article/pii/S0885230816301231 https://hal.inria.fr/hal-01399180}, doi = {10.1016/j.csl.2016.11.005}, year = {2016}, date = {2016-12-02}, journal = {Computer Speech & Language}, abstract = {Speech enhancement and automatic speech recognition (ASR) are most often evaluated in matched (or multi-condition) settings where the acoustic conditions of the training data match (or cover) those of the test data. Few studies have systematically assessed the impact of acoustic mismatches between training and test data, especially concerning recent speech enhancement and state-of-the-art ASR techniques. In this article, we study this issue in the context of the CHiME-3 dataset, which consists of sentences spoken by talkers situated in challenging noisy environments recorded using a 6-channel tablet based microphone array. We provide a critical analysis of the results published on this dataset for various signal enhancement, feature extraction, and ASR backend techniques and perform a number of new experiments in order to separately assess the impact of different noise environments, different numbers and positions of microphones, or simulated vs. real data on speech enhancement and ASR performance. We show that, with the exception of minimum variance distortionless response (MVDR) beamforming, most algorithms perform consistently on real and simulated data and can benefit from training on simulated data. We also find that training on different noise environments and different microphones barely affects the ASR performance, especially when several environments are present in the training data: only the number of microphones has a significant impact. Based on these results, we introduce the CHiME-4 Speech Separation and Recognition Challenge, which revisits the CHiME-3 dataset and makes it more challenging by reducing the number of microphones available for testing.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Speech enhancement and automatic speech recognition (ASR) are most often evaluated in matched (or multi-condition) settings where the acoustic conditions of the training data match (or cover) those of the test data. Few studies have systematically assessed the impact of acoustic mismatches between training and test data, especially concerning recent speech enhancement and state-of-the-art ASR techniques. In this article, we study this issue in the context of the CHiME-3 dataset, which consists of sentences spoken by talkers situated in challenging noisy environments recorded using a 6-channel tablet based microphone array. We provide a critical analysis of the results published on this dataset for various signal enhancement, feature extraction, and ASR backend techniques and perform a number of new experiments in order to separately assess the impact of different noise environments, different numbers and positions of microphones, or simulated vs. real data on speech enhancement and ASR performance. We show that, with the exception of minimum variance distortionless response (MVDR) beamforming, most algorithms perform consistently on real and simulated data and can benefit from training on simulated data. We also find that training on different noise environments and different microphones barely affects the ASR performance, especially when several environments are present in the training data: only the number of microphones has a significant impact. Based on these results, we introduce the CHiME-4 Speech Separation and Recognition Challenge, which revisits the CHiME-3 dataset and makes it more challenging by reducing the number of microphones available for testing.
|
Aditya Arie Nugraha; Antoine Liutkus; Emmanuel Vincent Multichannel audio source separation with deep neural networks Journal Article IEEE/ACM Transactions on Audio, Speech, and Language Processing, 24 (9), pp. 1652 – 1664, 2016, ISSN: 2329-9290. @article{nugraha2016aslp,
title = {Multichannel audio source separation with deep neural networks}, author = {Aditya Arie Nugraha and Antoine Liutkus and Emmanuel Vincent}, url = {http://ieeexplore.ieee.org/document/7492604 https://hal.inria.fr/hal-01163369}, doi = {10.1109/TASLP.2016.2580946}, issn = {2329-9290}, year = {2016}, date = {2016-06-16}, journal = {IEEE/ACM Transactions on Audio, Speech, and Language Processing}, volume = {24}, number = {9}, pages = {1652 – 1664}, publisher = {IEEE}, abstract = {This article addresses the problem of multichannel audio source separation. We propose a framework where deep neural networks (DNNs) are used to model the source spectra and combined with the classical multichannel Gaussian model to exploit the spatial information. The parameters are estimated in an iterative expectation-maximization (EM) fashion and used to derive a multichannel Wiener filter. We present an extensive experimental study to show the impact of different design choices on the performance of the proposed technique. We consider different cost functions for the training of DNNs, namely the probabilistically motivated Itakura-Saito divergence, and also Kullback-Leibler, Cauchy, mean squared error, and phase-sensitive cost functions. We also study the number of EM iterations and the use of multiple DNNs, where each DNN aims to improve the spectra estimated by the preceding EM iteration. Finally, we present its application to a speech enhancement problem. The experimental results show the benefit of the proposed multichannel approach over a single-channel DNNbased approach and the conventional multichannel nonnegative matrix factorization based iterative EM algorithm.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This article addresses the problem of multichannel audio source separation. We propose a framework where deep neural networks (DNNs) are used to model the source spectra and combined with the classical multichannel Gaussian model to exploit the spatial information. The parameters are estimated in an iterative expectation-maximization (EM) fashion and used to derive a multichannel Wiener filter. We present an extensive experimental study to show the impact of different design choices on the performance of the proposed technique. We consider different cost functions for the training of DNNs, namely the probabilistically motivated Itakura-Saito divergence, and also Kullback-Leibler, Cauchy, mean squared error, and phase-sensitive cost functions. We also study the number of EM iterations and the use of multiple DNNs, where each DNN aims to improve the spectra estimated by the preceding EM iteration. Finally, we present its application to a speech enhancement problem. The experimental results show the benefit of the proposed multichannel approach over a single-channel DNNbased approach and the conventional multichannel nonnegative matrix factorization based iterative EM algorithm.
|
Inproceedings |
Aditya Arie Nugraha; Antoine Liutkus; Emmanuel Vincent Multichannel music separation with deep neural networks Inproceedings 2016 European Signal Processing Conference (EUSIPCO), pp. 1748-1752, Budapest, Hungary, 2016. @inproceedings{nugraha2016eusipco,
title = {Multichannel music separation with deep neural networks}, author = {Aditya Arie Nugraha and Antoine Liutkus and Emmanuel Vincent}, url = {http://ieeexplore.ieee.org/document/7760548/ https://hal.inria.fr/hal-01334614}, doi = {10.1109/EUSIPCO.2016.7760548}, year = {2016}, date = {2016-08-29}, booktitle = {2016 European Signal Processing Conference (EUSIPCO)}, pages = {1748-1752}, address = {Budapest, Hungary}, abstract = {This article addresses the problem of multichannel music separation. We propose a framework where the source spectra are estimated using deep neural networks and combined with spatial covariance matrices to encode the source spatial characteristics. The parameters are estimated in an iterative expectation-maximization fashion and used to derive a multichannel Wiener filter. We evaluate the proposed framework for the task of music separation on a large dataset. Experimental results show that the method we describe performs consistently well in separating singing voice and other instruments from realistic musical mixtures.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } This article addresses the problem of multichannel music separation. We propose a framework where the source spectra are estimated using deep neural networks and combined with spatial covariance matrices to encode the source spatial characteristics. The parameters are estimated in an iterative expectation-maximization fashion and used to derive a multichannel Wiener filter. We evaluate the proposed framework for the task of music separation on a large dataset. Experimental results show that the method we describe performs consistently well in separating singing voice and other instruments from realistic musical mixtures.
|
2015 |
Inproceedings |
Sunit Sivasankaran; Aditya Arie Nugraha; Emmanuel Vincent; Juan Andrés Morales Cordovilla; Siddharth Dalmia; Irina Illina; Antoine Liutkus Robust ASR using neural network based speech enhancement and feature simulation Inproceedings 2015 IEEE Workshop on Automatic Speech Recognition and Understanding (ASRU), pp. 482-489, Scottsdale, AZ, USA, 2015. @inproceedings{sivasankaran2015asru,
title = {Robust ASR using neural network based speech enhancement and feature simulation}, author = {Sunit Sivasankaran and Aditya Arie Nugraha and Emmanuel Vincent and Juan Andrés Morales Cordovilla and Siddharth Dalmia and Irina Illina and Antoine Liutkus}, url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7404834 https://hal.inria.fr/hal-01204553}, doi = {10.1109/ASRU.2015.7404834}, year = {2015}, date = {2015-12-13}, booktitle = {2015 IEEE Workshop on Automatic Speech Recognition and Understanding (ASRU)}, pages = {482-489}, address = {Scottsdale, AZ, USA}, abstract = {We consider the problem of robust automatic speech recognition (ASR) in the context of the CHiME-3 Challenge. The proposed system combines three contributions. First, we propose a deep neural network (DNN) based multichannel speech enhancement technique, where the speech and noise spectra are estimated using a DNN based regressor and the spatial parameters are derived in an expectation-maximization (EM) like fashion. Second, a conditional restricted Boltzmann machine (CRBM) model is trained using the obtained enhanced speech and used to generate simulated training and development datasets. The goal is to increase the similarity between simulated and real data, so as to increase the benefit of multicondition training. Finally, we make some changes to the ASR backend. Our system ranked 4th among 25 entries.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } We consider the problem of robust automatic speech recognition (ASR) in the context of the CHiME-3 Challenge. The proposed system combines three contributions. First, we propose a deep neural network (DNN) based multichannel speech enhancement technique, where the speech and noise spectra are estimated using a DNN based regressor and the spatial parameters are derived in an expectation-maximization (EM) like fashion. Second, a conditional restricted Boltzmann machine (CRBM) model is trained using the obtained enhanced speech and used to generate simulated training and development datasets. The goal is to increase the similarity between simulated and real data, so as to increase the benefit of multicondition training. Finally, we make some changes to the ASR backend. Our system ranked 4th among 25 entries.
|
2014 |
Journal Articles |
Aditya Arie Nugraha; Kazumasa Yamamoto; Seiichi Nakagawa Single-channel dereverberation by feature mapping using cascade neural networks for robust distant speaker identification and speech recognition Journal Article EURASIP Journal on Audio, Speech, and Music Processing, 2014 (13), pp. 1-31, 2014, ISSN: 1687-4722. @article{nugraha2014asmp,
title = {Single-channel dereverberation by feature mapping using cascade neural networks for robust distant speaker identification and speech recognition}, author = {Aditya Arie Nugraha and Kazumasa Yamamoto and Seiichi Nakagawa}, url = {http://asmp.eurasipjournals.springeropen.com/articles/10.1186/1687-4722-2014-13}, doi = {10.1186/1687-4722-2014-13}, issn = {1687-4722}, year = {2014}, date = {2014-04-10}, journal = {EURASIP Journal on Audio, Speech, and Music Processing}, volume = {2014}, number = {13}, pages = {1-31}, abstract = {We present a feature enhancement method that uses neural networks (NNs) to map the reverberant feature in a log-melspectral domain to its corresponding anechoic feature. The mapping is done by cascade NNs trained using Cascade2 algorithm with an implementation of segment-based normalization. Experiments using speaker identification (SID) and automatic speech recognition (ASR) systems were conducted to evaluate the method. The experiments of SID system was conducted by using our own simulated and real reverberant datasets, while the CENSREC-4 evaluation framework was used as the evaluation for the ASR system. The proposed method could remarkably improve the performance of both systems by using limited stereo data and low speaker-variant data as the training data. From the evaluation using SID, we reached 26.0% and 34.8% of error rate reduction (ERR) relative to the baseline by using simulated and real data, respectively, by using only one pair of utterances for matched condition cases. Then, by using combined dataset containing 15 pairs of utterances by one speaker from three positions in a room, we could reach 93.7% of average identification rate (three known and two unknown positions), which was 42.2% of ERR relative to the use of cepstral mean normalization (CMN). From the evaluation using ASR, by using 40 pairs of utterances as the NN training data, we could reach 78.4% of ERR relative to the baseline by using simulated utterances by five speakers. Moreover, we could reach 75.4% and 71.6% of ERR relative to the baseline by using real utterances by five speakers and one speaker, respectively.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present a feature enhancement method that uses neural networks (NNs) to map the reverberant feature in a log-melspectral domain to its corresponding anechoic feature. The mapping is done by cascade NNs trained using Cascade2 algorithm with an implementation of segment-based normalization. Experiments using speaker identification (SID) and automatic speech recognition (ASR) systems were conducted to evaluate the method. The experiments of SID system was conducted by using our own simulated and real reverberant datasets, while the CENSREC-4 evaluation framework was used as the evaluation for the ASR system. The proposed method could remarkably improve the performance of both systems by using limited stereo data and low speaker-variant data as the training data. From the evaluation using SID, we reached 26.0% and 34.8% of error rate reduction (ERR) relative to the baseline by using simulated and real data, respectively, by using only one pair of utterances for matched condition cases. Then, by using combined dataset containing 15 pairs of utterances by one speaker from three positions in a room, we could reach 93.7% of average identification rate (three known and two unknown positions), which was 42.2% of ERR relative to the use of cepstral mean normalization (CMN). From the evaluation using ASR, by using 40 pairs of utterances as the NN training data, we could reach 78.4% of ERR relative to the baseline by using simulated utterances by five speakers. Moreover, we could reach 75.4% and 71.6% of ERR relative to the baseline by using real utterances by five speakers and one speaker, respectively.
|
2013 |
Inproceedings |
Aditya Arie Nugraha; Kazumasa Yamamoto; Seiichi Nakagawa Single channel dereverberation method in log-melspectral domain using limited stereo data for distant speaker identification Inproceedings 2013 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA), Kaohsiung, Taiwan, 2013. @inproceedings{nugraha2013apsipa,
title = {Single channel dereverberation method in log-melspectral domain using limited stereo data for distant speaker identification}, author = {Aditya Arie Nugraha and Kazumasa Yamamoto and Seiichi Nakagawa}, url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6694117}, doi = {10.1109/APSIPA.2013.6694117}, year = {2013}, date = {2013-10-29}, booktitle = {2013 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA)}, address = {Kaohsiung, Taiwan}, abstract = {In this paper, we present a feature enhancement method that uses neural networks (NNs) to map the reverberant feature in a log-melspectral domain to its corresponding anechoic feature. The mapping is done by cascade NNs trained using Cascade 2 algorithm with an implementation of segment-based normalization. We assumed that the dimensions of feature were independent from each other and experimented on several assumptions of the room transfer function for each dimension. Speaker identification system was used to evaluate the method. Using limited stereo data, we could improve the identification rate for simulated and real datasets. On the simulated dataset, we could show that the proposed method is effective for both noiseless and noisy reverberant environments, with various noise and reverberation characteristics. On the real dataset, we could show that by using 6 independent NNs configuration for 24-dimensional feature and only 1 pair of utterances we could get 35% average error reduction relative to the baseline, which employed cepstral mean normalization (CMN).}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } In this paper, we present a feature enhancement method that uses neural networks (NNs) to map the reverberant feature in a log-melspectral domain to its corresponding anechoic feature. The mapping is done by cascade NNs trained using Cascade 2 algorithm with an implementation of segment-based normalization. We assumed that the dimensions of feature were independent from each other and experimented on several assumptions of the room transfer function for each dimension. Speaker identification system was used to evaluate the method. Using limited stereo data, we could improve the identification rate for simulated and real datasets. On the simulated dataset, we could show that the proposed method is effective for both noiseless and noisy reverberant environments, with various noise and reverberation characteristics. On the real dataset, we could show that by using 6 independent NNs configuration for 24-dimensional feature and only 1 pair of utterances we could get 35% average error reduction relative to the baseline, which employed cepstral mean normalization (CMN).
|
Technical Reports |
Aditya Arie Nugraha; Kazumasa Yamamoto; Seiichi Nakagawa Single channel dereverberation method by feature mapping using limited stereo data Technical Report Institute of Electronics, Information and Communication Engineers (IEICE) (SP2013-54), 2013. @techreport{nugraha2013ieice,
title = {Single channel dereverberation method by feature mapping using limited stereo data}, author = {Aditya Arie Nugraha and Kazumasa Yamamoto and Seiichi Nakagawa}, url = {http://www.ieice.org/ken/paper/20130725FB4S/eng/}, year = {2013}, date = {2013-07-25}, number = {SP2013-54}, pages = {7-12}, institution = {Institute of Electronics, Information and Communication Engineers (IEICE)}, abstract = {In this paper, we present a feature enhancement method that uses neural networks (NNs) to map the reverberant feature in a log-melspectral domain to its corresponding anechoic feature. The mapping is done by cascade NNs trained using Cascade2 algorithm with an implementation of segment-based normalization. Experiments using speaker identification (SID) and speech recognition (ASR) systems were conducted to evaluate the method. The experiments of SID system was conducted by using real noisy reverberant datasets, while CENSREC-4 evaluation framework was used as the evaluation for the ASR system. Using limited stereo data consisting of simultaneously recorded clean speech and reverberant speech, the proposed method could remarkably improve the performance of both systems.}, key = {SP2013-54}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } In this paper, we present a feature enhancement method that uses neural networks (NNs) to map the reverberant feature in a log-melspectral domain to its corresponding anechoic feature. The mapping is done by cascade NNs trained using Cascade2 algorithm with an implementation of segment-based normalization. Experiments using speaker identification (SID) and speech recognition (ASR) systems were conducted to evaluate the method. The experiments of SID system was conducted by using real noisy reverberant datasets, while CENSREC-4 evaluation framework was used as the evaluation for the ASR system. Using limited stereo data consisting of simultaneously recorded clean speech and reverberant speech, the proposed method could remarkably improve the performance of both systems.
|
2012 |
Inproceedings |
Aditya Arie Nugraha; Seiichi Nakagawa Improving distant speaker identification robustness using a nonlinear regression based dereverberation method in feature domain Inproceedings 2012 Autumn meeting of the Acoustical Society of Japan, pp. 163-166, Nagano, Japan, 2012. @inproceedings{nugraha2012asj,
title = {Improving distant speaker identification robustness using a nonlinear regression based dereverberation method in feature domain}, author = {Aditya Arie Nugraha and Seiichi Nakagawa}, year = {2012}, date = {2012-09-19}, booktitle = {2012 Autumn meeting of the Acoustical Society of Japan}, pages = {163-166}, address = {Nagano, Japan}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
2011 |
Inproceedings |
Yudi Satria Gondokaryono; Yoanes Bandung; Joko Ari Wibowo; Aditya Arie Nugraha; Bryan Yonathan; Dwi Ramadhianto Irawan Performance evaluation of audio-video streaming service in Keerom, Papua using integrated audio-video performance test tool Inproceedings 2011 6th International Conference on Telecommunication Systems, Services, and Applications (TSSA), pp. 145-148, 2011, ISBN: 978-1-4577-1441-2. @inproceedings{gondokaryono2011tssa,
title = {Performance evaluation of audio-video streaming service in Keerom, Papua using integrated audio-video performance test tool}, author = {Yudi Satria Gondokaryono and Yoanes Bandung and Joko Ari Wibowo and Aditya Arie Nugraha and Bryan Yonathan and Dwi Ramadhianto Irawan }, url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&arnumber=6095423}, doi = {10.1109/TSSA.2011.6095423}, isbn = {978-1-4577-1441-2}, year = {2011}, date = {2011-10-20}, booktitle = {2011 6th International Conference on Telecommunication Systems, Services, and Applications (TSSA)}, pages = {145-148}, abstract = {This study compared some video codec, audio codec, audio bit rate, video bit rate to determine the quality of the audio-video streaming service on the network Keerom, Papua. Average capacity in this network is 1.5Mbps. Mpeg audio and ac3 are choosen because of its characteristic, while the video codec is mpeg4 and H.264. Audio bit rate used 64 and 128kbps, while the video bit rate 64, 128 and 256kbps. The experiments result show the quality of the audio-video streaming service was better when the audio codec used mpeg audio 64kbps-mpeg4 256kbps. The test results will be used as a reference implementation of audio-video streaming service later in the network Keerom, Papua.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } This study compared some video codec, audio codec, audio bit rate, video bit rate to determine the quality of the audio-video streaming service on the network Keerom, Papua. Average capacity in this network is 1.5Mbps. Mpeg audio and ac3 are choosen because of its characteristic, while the video codec is mpeg4 and H.264. Audio bit rate used 64 and 128kbps, while the video bit rate 64, 128 and 256kbps. The experiments result show the quality of the audio-video streaming service was better when the audio codec used mpeg audio 64kbps-mpeg4 256kbps. The test results will be used as a reference implementation of audio-video streaming service later in the network Keerom, Papua.
|
2010 |
Inproceedings |
Aditya Arie Nugraha; Andik Taufiq; Nur Ichsan Utama; Ary Setijadi Prihatmanto Smart Assistant for Museum’s Objects Navigation (SAMsON) Inproceedings 5th AOTULE International Postgraduate Students Conference on Engineering, pp. 186-189, 2010, ISBN: 978-9-7913-4491-3. @inproceedings{nugraha2010aotule,
title = {Smart Assistant for Museum’s Objects Navigation (SAMsON)}, author = {Aditya Arie Nugraha and Andik Taufiq and Nur Ichsan Utama and Ary Setijadi Prihatmanto}, isbn = {978-9-7913-4491-3}, year = {2010}, date = {2010-11-01}, booktitle = {5th AOTULE International Postgraduate Students Conference on Engineering}, pages = {186-189}, abstract = {In this paper, we propose the idea of Smart Assistant for Museum’s Objects Navigation (SAMsON). SAMsON is a mobile multimodal spoken dialogue system that will be used by museum’s visitors for exploring the museum and learning about its collection. SAMsON has three main features: (1) Collection Browser, (2) Object Recognizer, and (3) Virtual Guide. Collection Browser provides information about the museum, its floor maps, and its collections. Visitors also can search certain object using the browser that will give the visitor a brief description of the object and inform about its location in the museum. Object Recognizer is used when visitors want to know the detail of certain object. Visitor only need to capture the image of the object and SAMsON will provides information on the mobile device screen using augmented reality (AR) technology. Both Collection Browser and Object Recognizer are also use speech, beside graphical user-interface (GUI), to deliver the information to visitors. Meanwhile, Virtual Guide will act as personal assistant that can communicate with visitors using speech. SAMsON also supported by the use of wireless network (Wi-Fi), to provide the connection for mobile device, and wireless sensor network (RFID), to know the profile and location of visitors.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } In this paper, we propose the idea of Smart Assistant for Museum’s Objects Navigation (SAMsON). SAMsON is a mobile multimodal spoken dialogue system that will be used by museum’s visitors for exploring the museum and learning about its collection. SAMsON has three main features: (1) Collection Browser, (2) Object Recognizer, and (3) Virtual Guide. Collection Browser provides information about the museum, its floor maps, and its collections. Visitors also can search certain object using the browser that will give the visitor a brief description of the object and inform about its location in the museum. Object Recognizer is used when visitors want to know the detail of certain object. Visitor only need to capture the image of the object and SAMsON will provides information on the mobile device screen using augmented reality (AR) technology. Both Collection Browser and Object Recognizer are also use speech, beside graphical user-interface (GUI), to deliver the information to visitors. Meanwhile, Virtual Guide will act as personal assistant that can communicate with visitors using speech. SAMsON also supported by the use of wireless network (Wi-Fi), to provide the connection for mobile device, and wireless sensor network (RFID), to know the profile and location of visitors.
|
Aditya Arie Nugraha; Bryan Yonathan; Yoanes Bandung; Armein Z. R. Langi Challenges in the implementation of rural digital learning service: Case study of testbed network in Keerom, Papua Inproceedings e-Indonesia Initiatives Forum VI, pp. 27-31, 2010, ISBN: 978-6-0296-9070-5, (in Indonesian). @inproceedings{nugraha2010eii,
title = {Challenges in the implementation of rural digital learning service: Case study of testbed network in Keerom, Papua}, author = {Aditya Arie Nugraha and Bryan Yonathan and Yoanes Bandung and Armein Z. R. Langi}, url = {https://qjournal.id/jurnal/paper/0001600018/Tantangan-dalam-Implementasi-Layanan-Digital-Learning-Pedesaan-Studi-Kasus-Jaringan-Testbed-Keerom-Papua}, isbn = {978-6-0296-9070-5}, year = {2010}, date = {2010-05-05}, booktitle = {e-Indonesia Initiatives Forum VI}, pages = {27-31}, note = {in Indonesian}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
Yoanes Bandung; Aditya Arie Nugraha; Bryan Yonathan; Armein Z. R. Langi; G. A. Putri Saptawati; Dwi Hendratmo Widyantoro; Agus Fany; Liliasari The design of digital learning product-service system for rural elementary schools’ teachers Inproceedings e-Indonesia Initiatives Forum VI, pp. 1-4, 2010, ISBN: 978-6-0296-9070-5, (in Indonesian). @inproceedings{bandung2010eii,
title = {The design of digital learning product-service system for rural elementary schools’ teachers}, author = {Yoanes Bandung and Aditya Arie Nugraha and Bryan Yonathan and Armein Z. R. Langi and G. A. Putri Saptawati and Dwi Hendratmo Widyantoro and Agus Fany and Liliasari}, url = {https://qjournal.id/jurnal/paper/0001600009/Perancangan-Sistem-Produk-Layanan-Komunitas-Guru-Belajar-untuk-Sekolah-Dasar-di-Pedesaan}, isbn = {978-6-0296-9070-5}, year = {2010}, date = {2010-05-05}, booktitle = {e-Indonesia Initiatives Forum VI}, pages = {1-4}, note = {in Indonesian}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
Bryan Yonathan; Aditya Arie Nugraha; Yoanes Bandung; Armein Z. R. Langi Virtual class service for supporting rural digital learning: Case study of testbed network in Keerom, Papua Inproceedings e-Indonesia Initiatives Forum VI, pp. 21-26, 2010, ISBN: 978-6-0296-9070-5, (in Indonesian). @inproceedings{yonathan2010eii,
title = {Virtual class service for supporting rural digital learning: Case study of testbed network in Keerom, Papua}, author = {Bryan Yonathan and Aditya Arie Nugraha and Yoanes Bandung and Armein Z. R. Langi}, url = {https://qjournal.id/jurnal/paper/0001600017/Layanan-Kelas-Virtual-dengan-Multimedia-Streaming-untuk-Mendukung-Digital-Learning-Pedesaan-Studi-Kasus-Keerom-Papua}, isbn = {978-6-0296-9070-5}, year = {2010}, date = {2010-05-05}, booktitle = {e-Indonesia Initiatives Forum VI}, pages = {21-26}, note = {in Indonesian}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
Adhi Setyo Santoso; Dwi Ramadhianto Irawan; Bryan Yonathan; Aditya Arie Nugraha; Yoanes Bandung; Armein Z. R. Langi The design of web-based bandwidth management system for rural digital learning networks Inproceedings e-Indonesia Initiatives Forum VI, pp. 45-50, 2010, ISBN: 978-6-0296-9070-5, (in Indonesian). @inproceedings{santoso2010eii,
title = {The design of web-based bandwidth management system for rural digital learning networks}, author = {Adhi Setyo Santoso and Dwi Ramadhianto Irawan and Bryan Yonathan and Aditya Arie Nugraha and Yoanes Bandung and Armein Z. R. Langi}, url = {https://qjournal.id/jurnal/paper/0001600025/Perancangan-Sistem-Manajemen-Bandwidth-Berbasis-Web-untuk-Jaringan-Digital-learning-Pedesaan}, isbn = {978-6-0296-9070-5}, year = {2010}, date = {2010-05-05}, booktitle = {e-Indonesia Initiatives Forum VI}, pages = {45-50}, note = {in Indonesian}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
Journal Articles |
Aska Narendra; Aditya Arie Nugraha; Yoanes Bandung; Armein Z. R. Langi; Bambang Pharmasetiawan Web based multimedia conference system for digital learning in rural elementary school Journal Article Advances in Electrical Engineering and Informatics, III , pp. 97-104, 2010. @article{narendra2010aeei,
title = {Web based multimedia conference system for digital learning in rural elementary school}, author = {Aska Narendra and Aditya Arie Nugraha and Yoanes Bandung and Armein Z. R. Langi and Bambang Pharmasetiawan}, year = {2010}, date = {2010-01-01}, journal = {Advances in Electrical Engineering and Informatics}, volume = {III}, pages = {97-104}, abstract = {This paper describes the process of designing a web-based multimedia conferencing system that will be used to support digital learning for elementary school in rural areas and implementing them in some network testbeds in Bandung, Subang, and Cianjur. The system must be able to send each of the constituent media, namely video, audio, and other materials (e.g. slide presentations) independently so that the learning process between student and teacher could still be running even if one of the media is absent. In addition, the multimedia conferencing system must also be easily operated independently by an elementary school teacher in rural areas with a minimum computer mastery level. The result is a product that is expected to be useful for improving the quality of primary education especially in rural areas through ICT applications.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper describes the process of designing a web-based multimedia conferencing system that will be used to support digital learning for elementary school in rural areas and implementing them in some network testbeds in Bandung, Subang, and Cianjur. The system must be able to send each of the constituent media, namely video, audio, and other materials (e.g. slide presentations) independently so that the learning process between student and teacher could still be running even if one of the media is absent. In addition, the multimedia conferencing system must also be easily operated independently by an elementary school teacher in rural areas with a minimum computer mastery level. The result is a product that is expected to be useful for improving the quality of primary education especially in rural areas through ICT applications.
|