-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
448 lines (405 loc) · 46.8 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
@article{FoMo-Bench,
title={FoMo-Bench: a multi-modal, multi-scale and multi-task Forest Monitoring Benchmark for remote sensing foundation models},
author={Bountos, Nikolaos Ioannis and Ouaknine, Arthur and Rolnick, David},
journal={arXiv preprint arXiv:2312.10114},
year={2023},
url={https://arxiv.org/abs/2312.10114}
}
@misc{OpenForest,
title={OpenForest: A data catalogue for machine learning in forest monitoring},
author={Arthur Ouaknine and Teja Kattenborn and Etienne Laliberté and David Rolnick},
year={2023},
eprint={2311.00277},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@Article{DeepForestBefore,
AUTHOR = {Weinstein, Ben G. and Marconi, Sergio and Bohlman, Stephanie and Zare, Alina and White, Ethan},
TITLE = {Individual Tree-Crown Detection in RGB Imagery Using Semi-Supervised Deep Learning Neural Networks},
JOURNAL = {Remote Sensing},
VOLUME = {11},
YEAR = {2019},
NUMBER = {11},
ARTICLE-NUMBER = {1309},
ISSN = {2072-4292},
ABSTRACT = {Remote sensing can transform the speed, scale, and cost of biodiversity and forestry surveys. Data acquisition currently outpaces the ability to identify individual organisms in high resolution imagery. We outline an approach for identifying tree-crowns in RGB imagery while using a semi-supervised deep learning detection network. Individual crown delineation has been a long-standing challenge in remote sensing and available algorithms produce mixed results. We show that deep learning models can leverage existing Light Detection and Ranging (LIDAR)-based unsupervised delineation to generate trees that are used for training an initial RGB crown detection model. Despite limitations in the original unsupervised detection approach, this noisy training data may contain information from which the neural network can learn initial tree features. We then refine the initial model using a small number of higher-quality hand-annotated RGB images. We validate our proposed approach while using an open-canopy site in the National Ecological Observation Network. Our results show that a model using 434,551 self-generated trees with the addition of 2848 hand-annotated trees yields accurate predictions in natural landscapes. Using an intersection-over-union threshold of 0.5, the full model had an average tree crown recall of 0.69, with a precision of 0.61 for the visually-annotated data. The model had an average tree detection rate of 0.82 for the field collected stems. The addition of a small number of hand-annotated trees improved the performance over the initial self-supervised model. This semi-supervised deep learning approach demonstrates that remote sensing can overcome a lack of labeled training data by generating noisy data for initial training using unsupervised methods and retraining the resulting models with high quality labeled data.},
DOI = {10.3390/rs11111309}
}
@misc{ReforesTree,
title={ReforesTree: A Dataset for Estimating Tropical Forest Carbon Stock with Deep Learning and Aerial Imagery},
author={Gyri Reiersen and David Dao and Björn Lütjens and Konstantin Klemmer and Kenza Amara and Attila Steinegger and Ce Zhang and Xiaoxiang Zhu},
year={2022},
eprint={2201.11192},
archivePrefix={arXiv},
primaryClass={cs.CV},
}
@misc{FOR-instance,
title={FOR-instance: a UAV laser scanning benchmark dataset for semantic and instance segmentation of individual trees},
author={Stefano Puliti and Grant Pearse and Peter Surový and Luke Wallace and Markus Hollaus and Maciej Wielgosz and Rasmus Astrup},
year={2023},
eprint={2309.01279},
archivePrefix={arXiv},
primaryClass={cs.CV},
}
@Article{MDAS,
AUTHOR = {Hu, J. and Liu, R. and Hong, D. and Camero, A. and Yao, J. and Schneider, M. and Kurz, F. and Segl, K. and Zhu, X. X.},
TITLE = {MDAS: a new multimodal benchmark dataset for remote sensing},
JOURNAL = {Earth System Science Data},
VOLUME = {15},
YEAR = {2023},
NUMBER = {1},
PAGES = {113--131},
DOI = {10.5194/essd-15-113-2023}
}
@article{TALLO,
author = {Jucker, Tommaso and Fischer, Fabian Jörg and Chave, Jérôme and Coomes, David A. and Caspersen, John and Ali, Arshad and Loubota Panzou, Grace Jopaul and Feldpausch, Ted R. and Falster, Daniel and Usoltsev, Vladimir A. and Adu-Bredu, Stephen and Alves, Luciana F. and Aminpour, Mohammad and Angoboy, Ilondea B. and Anten, Niels P. R. and Antin, Cécile and Askari, Yousef and Muñoz, Rodrigo and Ayyappan, Narayanan and Balvanera, Patricia and Banin, Lindsay and Barbier, Nicolas and Battles, John J. and Beeckman, Hans and Bocko, Yannick E. and Bond-Lamberty, Ben and Bongers, Frans and Bowers, Samuel and Brade, Thomas and van Breugel, Michiel and Chantrain, Arthur and Chaudhary, Rajeev and Dai, Jingyu and Dalponte, Michele and Dimobe, Kangbéni and Domec, Jean-Christophe and Doucet, Jean-Louis and Duursma, Remko A. and Enríquez, Moisés and van Ewijk, Karin Y. and Farfán-Rios, William and Fayolle, Adeline and Forni, Eric and Forrester, David I. and Gilani, Hammad and Godlee, John L. and Gourlet-Fleury, Sylvie and Haeni, Matthias and Hall, Jefferson S. and He, Jie-Kun and Hemp, Andreas and Hernández-Stefanoni, José L. and Higgins, Steven I. and Holdaway, Robert J. and Hussain, Kiramat and Hutley, Lindsay B. and Ichie, Tomoaki and Iida, Yoshiko and Jiang, Hai-sheng and Joshi, Puspa Raj and Kaboli, Hasan and Larsary, Maryam Kazempour and Kenzo, Tanaka and Kloeppel, Brian D. and Kohyama, Takashi and Kunwar, Suwash and Kuyah, Shem and Kvasnica, Jakub and Lin, Siliang and Lines, Emily R. and Liu, Hongyan and Lorimer, Craig and Loumeto, Jean-Joël and Malhi, Yadvinder and Marshall, Peter L. and Mattsson, Eskil and Matula, Radim and Meave, Jorge A. and Mensah, Sylvanus and Mi, Xiangcheng and Momo, Stéphane and Moncrieff, Glenn R. and Mora, Francisco and Nissanka, Sarath P. and O'Hara, Kevin L. and Pearce, Steven and Pelissier, Raphaël and Peri, Pablo L. and Ploton, Pierre and Poorter, Lourens and Pour, Mohsen Javanmiri and Pourbabaei, Hassan and Dupuy-Rada, Juan Manuel and Ribeiro, Sabina C. and Ryan, Casey and Sanaei, Anvar and Sanger, Jennifer and Schlund, Michael and Sellan, Giacomo and Shenkin, Alexander and Sonké, Bonaventure and Sterck, Frank J. and Svátek, Martin and Takagi, Kentaro and Trugman, Anna T. and Ullah, Farman and Vadeboncoeur, Matthew A. and Valipour, Ahmad and Vanderwel, Mark C. and Vovides, Alejandra G. and Wang, Weiwei and Wang, Li-Qiu and Wirth, Christian and Woods, Murray and Xiang, Wenhua and Ximenes, Fabiano de Aquino and Xu, Yaozhan and Yamada, Toshihiro and Zavala, Miguel A.},
title = {Tallo: A global tree allometry and crown architecture database},
journal = {Global Change Biology},
volume = {28},
number = {17},
pages = {5254-5268},
keywords = {allometric scaling, crown radius, forest biomass stocks, forest ecology, remote sensing, stem diameter, tree height},
doi = {10.1111/gcb.16302},
abstract = {Abstract Data capturing multiple axes of tree size and shape, such as a tree's stem diameter, height and crown size, underpin a wide range of ecological research—from developing and testing theory on forest structure and dynamics, to estimating forest carbon stocks and their uncertainties, and integrating remote sensing imagery into forest monitoring programmes. However, these data can be surprisingly hard to come by, particularly for certain regions of the world and for specific taxonomic groups, posing a real barrier to progress in these fields. To overcome this challenge, we developed the Tallo database, a collection of 498,838 georeferenced and taxonomically standardized records of individual trees for which stem diameter, height and/or crown radius have been measured. These data were collected at 61,856 globally distributed sites, spanning all major forested and non-forested biomes. The majority of trees in the database are identified to species (88\%), and collectively Tallo includes data for 5163 species distributed across 1453 genera and 187 plant families. The database is publicly archived under a CC-BY 4.0 licence and can be access from: https://doi.org/10.5281/zenodo.6637599. To demonstrate its value, here we present three case studies that highlight how the Tallo database can be used to address a range of theoretical and applied questions in ecology—from testing the predictions of metabolic scaling theory, to exploring the limits of tree allometric plasticity along environmental gradients and modelling global variation in maximum attainable tree height. In doing so, we provide a key resource for field ecologists, remote sensing researchers and the modelling community working together to better understand the role that trees play in regulating the terrestrial carbon cycle.},
year = {2022}
}
@online{MillionTrees,
author = {Ben Weinstein},
title = {MillionTrees},
year = 2023,
url = {https://milliontrees.idtrees.org/},
urldate = {2024-07-08}
}
@misc{WildForest3D,
title={Multi-Layer Modeling of Dense Vegetation from Aerial LiDAR Scans},
author={Ekaterina Kalinicheva and Loic Landrieu and Clément Mallet and Nesrine Chehata},
year={2022},
eprint={2204.11620},
archivePrefix={arXiv},
primaryClass={cs.CV},
}
@InProceedings{sortedAP,
author = {Chen, Long and Wu, Yuli and Stegmaier, Johannes and Merhof, Dorit},
title = {SortedAP: Rethinking Evaluation Metrics for Instance Segmentation},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops},
year = {2023},
month = {10},
pages = {3923-3929},
url = {https://openaccess.thecvf.com/content/ICCV2023W/BIC/html/Chen_SortedAP_Rethinking_Evaluation_Metrics_for_Instance_Segmentation_ICCVW_2023_paper.html}
}
@misc{AHN4,
author = {{Actueel Hoogtebestand Nederland}},
title = {{AHN4 - Actual Height Model of the Netherlands}},
year = 2020,
url = {https://www.ahn.nl/},
note = {Accessed: 2024-07-09}
}
@misc{Luchtfotos,
author = {{Beeldmateriaal Nederland}},
title = {{Luchtfoto's (Aerial Photographs)}},
year = 2024,
url = {https://www.beeldmateriaal.nl/luchtfotos},
note = {Accessed: 2024-07-09}
}
@misc{IGN_LiDAR_HD,
author = {{Institut national de l'information géographique et forestière (IGN)}},
title = {{LiDAR HD}},
year = 2020,
url = {https://geoservices.ign.fr/lidarhd},
note = {Accessed: 2024-07-09}
}
@misc{IGN_BD_ORTHO,
author = {{Institut national de l'information géographique et forestière (IGN)}},
title = {{BD ORTHO}},
year = 2021,
url = {https://geoservices.ign.fr/bdortho},
note = {Accessed: 2024-07-09}
}
@misc{amsterdam_trees,
author = {{Gemeente Amsterdam}},
title = {{Bomenbestand Amsterdam (Amsterdam Tree Dataset)}},
year = 2024,
url = {https://maps.amsterdam.nl/open_geodata/?k=505},
note = {Accessed: 2024-07-09}
}
@misc{bordeaux_trees,
author = {{Bordeaux Métropole}},
title = {{Patrimoine arboré de Bordeaux Métropole (Tree Heritage of Bordeaux Metropole)}},
year = 2024,
url = {https://opendata.bordeaux-metropole.fr/explore/dataset/ec_arbre_p/information/?disjunctive.insee},
note = {Accessed: 2024-07-09}
}
@misc{boomregister,
author = {{Coöperatief Boomregister U.A.}},
title = {{Boom Register (Tree Register)}},
year = 2014,
url = {https://boomregister.nl/},
note = {Accessed: 2024-07-11}
}
@article{urban-trees,
title = {Challenges for computer vision as a tool for screening urban trees through street-view images},
journal = {Urban Forestry \& Urban Greening},
volume = {95},
pages = {128316},
year = {2024},
issn = {1618-8667},
doi = {10.1016/j.ufug.2024.128316},
author = {Tito Arevalo-Ramirez and Anali Alfaro and José Figueroa and Mauricio Ponce-Donoso and Jose M. Saavedra and Matías Recabarren and José Delpiano},
keywords = {Urban tree, Computer vision, Deep learning},
abstract = {Urban forests play a fundamental and irreplaceable role within cities through the ecosystem services they provide, such as carbon capture. However, inadequate management of urban trees can heighten the risks they pose to society. For instance, mechanical failures of tree components, such as branches, can cause harm to individuals and property. Regular assessments of tree conditions are necessary to mitigate these tree-related hazards, yet such evaluations are labor-intensive and currently lack automation. Previous studies have proposed utilizing street view images to alleviate tree inspection and shown the feasibility of visually inspecting trees. However, only a limited number of studies have addressed the automatic evaluation of urban trees, a challenge that can potentially be addressed using deep learning networks. Particularly in urban environments, there is a pressing need for increased automation in unresolved computer vision tasks. Therefore, this research presents a comprehensive analysis of neural networks and publicly available datasets that can aid arborists in automatically identifying urban trees. Specifically, we investigate the potential of deep learning networks in classifying tree genera and segmenting individual trees and their trunks. We emphasize the utilization of transfer learning strategies to enhance tree identification. The results demonstrate that neural networks can be considered practical tools for assisting arborists in tree recognition. Nevertheless, there are still gaps that remain and require attention in future research endeavors.}
}
@article{olive-tree,
author = {Safonova, Anastasiia and Guirado, Emilio and Maglinets, Yuriy and Alcaraz-Segura, Domingo and Tabik, Siham},
title = {Olive Tree Biovolume from UAV Multi-Resolution Image Segmentation with Mask R-CNN},
journal = {Sensors},
volume = {21},
year = {2021},
number = {5},
pages = {1617},
pmid = {33668984},
issn = {1424-8220},
doi = {10.3390/s21051617},
abstract = {Olive tree growing is an important economic activity in many countries, mostly in the Mediterranean Basin, Argentina, Chile, Australia, and California. Although recent intensification techniques organize olive groves in hedgerows, most olive groves are rainfed and the trees are scattered (as in Spain and Italy, which account for 50\% of the world’s olive oil production). Accurate measurement of trees biovolume is a first step to monitor their performance in olive production and health. In this work, we use one of the most accurate deep learning instance segmentation methods (Mask R-CNN) and unmanned aerial vehicles (UAV) images for olive tree crown and shadow segmentation (OTCS) to further estimate the biovolume of individual trees. We evaluated our approach on images with different spectral bands (red, green, blue, and near infrared) and vegetation indices (normalized difference vegetation index—NDVI—and green normalized difference vegetation index—GNDVI). The performance of red-green-blue (RGB) images were assessed at two spatial resolutions 3 cm/pixel and 13 cm/pixel, while NDVI and GNDV images were only at 13 cm/pixel. All trained Mask R-CNN-based models showed high performance in the tree crown segmentation, particularly when using the fusion of all dataset in GNDVI and NDVI (F1-measure from 95\% to 98\%). The comparison in a subset of trees of our estimated biovolume with ground truth measurements showed an average accuracy of 82\%. Our results support the use of NDVI and GNDVI spectral indices for the accurate estimation of the biovolume of scattered trees, such as olive trees, in UAV images.}
}
@Article{amf_gd_yolov8,
AUTHOR = {Zhong, Hao and Zhang, Zheyu and Liu, Haoran and Wu, Jinzhuo and Lin, Wenshu},
TITLE = {Individual Tree Species Identification for Complex Coniferous and Broad-Leaved Mixed Forests Based on Deep Learning Combined with UAV LiDAR Data and RGB Images},
JOURNAL = {Forests},
VOLUME = {15},
YEAR = {2024},
NUMBER = {2},
ARTICLE-NUMBER = {293},
ISSN = {1999-4907},
ABSTRACT = {Automatic and accurate individual tree species identification is essential for the realization of smart forestry. Although existing studies have used unmanned aerial vehicle (UAV) remote sensing data for individual tree species identification, the effects of different spatial resolutions and combining multi-source remote sensing data for automatic individual tree species identification using deep learning methods still require further exploration, especially in complex forest conditions. Therefore, this study proposed an improved YOLOv8 model for individual tree species identification using multisource remote sensing data under complex forest stand conditions. Firstly, the RGB and LiDAR data of natural coniferous and broad-leaved mixed forests under complex conditions in Northeast China were acquired via a UAV. Then, different spatial resolutions, scales, and band combinations of multisource remote sensing data were explored, based on the YOLOv8 model for tree species identification. Subsequently, the Attention Multi-level Fusion (AMF) Gather-and-Distribute (GD) YOLOv8 model was proposed, according to the characteristics of the multisource remote sensing forest data, in which the two branches of the AMF Net backbone were able to extract and fuse features from multisource remote sensing data sources separately. Meanwhile, the GD mechanism was introduced into the neck of the model, in order to fully utilize the extracted features of the main trunk and complete the identification of eight individual tree species in the study area. The results showed that the YOLOv8x model based on RGB images combined with current mainstream object detection algorithms achieved the highest mAP of 75.3\%. When the spatial resolution was within 8 cm, the accuracy of individual tree species identification exhibited only a slight variation. However, the accuracy decreased significantly with the decrease of spatial resolution when the resolution was greater than 15 cm. The identification results of different YOLOv8 scales showed that x, l, and m scales could exhibit higher accuracy compared with other scales. The DGB and PCA-D band combinations were superior to other band combinations for individual tree identification, with mAP of 75.5\% and 76.2\%, respectively. The proposed AMF GD YOLOv8 model had a more significant improvement in tree species identification accuracy than a single remote sensing sources and band combinations data, with a mAP of 81.0\%. The study results clarified the impact of spatial resolution on individual tree species identification and demonstrated the excellent performance of the proposed AMF GD YOLOv8 model in individual tree species identification, which provides a new solution and technical reference for forestry resource investigation combined multisource remote sensing data.},
DOI = {10.3390/f15020293}
}
@Article{lidar_benchmark,
AUTHOR = {Eysn, Lothar and Hollaus, Markus and Lindberg, Eva and Berger, Frédéric and Monnet, Jean-Matthieu and Dalponte, Michele and Kobal, Milan and Pellegrini, Marco and Lingua, Emanuele and Mongus, Domen and Pfeifer, Norbert},
TITLE = {A Benchmark of Lidar-Based Single Tree Detection Methods Using Heterogeneous Forest Data from the Alpine Space},
JOURNAL = {Forests},
VOLUME = {6},
YEAR = {2015},
NUMBER = {5},
PAGES = {1721--1747},
ISSN = {1999-4907},
ABSTRACT = {In this study, eight airborne laser scanning (ALS)-based single tree detection methods are benchmarked and investigated. The methods were applied to a unique dataset originating from different regions of the Alpine Space covering different study areas, forest types, and structures. This is the first benchmark ever performed for different forests within the Alps. The evaluation of the detection results was carried out in a reproducible way by automatically matching them to precise in situ forest inventory data using a restricted nearest neighbor detection approach. Quantitative statistical parameters such as percentages of correctly matched trees and omission and commission errors are presented. The proposed automated matching procedure presented herein shows an overall accuracy of 97\%. Method based analysis, investigations per forest type, and an overall benchmark performance are presented. The best matching rate was obtained for single-layered coniferous forests. Dominated trees were challenging for all methods. The overall performance shows a matching rate of 47\%, which is comparable to results of other benchmarks performed in the past. The study provides new insight regarding the potential and limits of tree detection with ALS and underlines some key aspects regarding the choice of method when performing single tree detection for the various forest types encountered in alpine regions.},
DOI = {10.3390/f6051721}
}
@article{rgb-dl-watershed,
author = {Maximilian Freudenberg and Paul Magdon and Nils Nölke},
title = {Individual tree crown delineation in high-resolution remote sensing images based on U-Net},
journal = {Neural Computing and Applications},
year = {2022},
volume = {34},
number = {24},
pages = {22197--22207},
doi = {10.1007/s00521-022-07640-4},
abstract = {We present a deep learning-based framework for individual tree crown delineation in aerial and satellite images. This is an important task, e.g., for forest yield or carbon stock estimation. In contrast to earlier work, the presented method creates irregular polygons instead of bounding boxes and also provides a tree cover mask for areas that are not separable. Furthermore, it is trainable with low amounts of training data and does not need 3D height information from, e.g., laser sensors. We tested the approach in two scenarios: (1) with 30 cm WorldView-3 satellite imagery from an urban region in Bengaluru, India, and (2) with 5 cm aerial imagery of a densely forested area near Gartow, Germany. The intersection over union between the reference and predicted tree cover mask is 71.2\% for the satellite imagery and 81.9\% for the aerial images. On the polygon level, the method reaches an accuracy of 46.3\% and a recall of 63.7\% in the satellite images and an accuracy of 52\% and recall of 66.2\% in the aerial images, which is comparable to previous works that only predicted bounding boxes. Depending on the image resolution, limitations to separate individual tree crowns occur in situations where trees are hardly separable even for human image interpreters (e.g., homogeneous canopies, very small trees). The results indicate that the presented approach can efficiently delineate individual tree crowns in high-resolution optical images. Given the high availability of such imagery, the framework provides a powerful tool for tree monitoring. The source code and pretrained weights are publicly available at https://github.com/AWF-GAUG/TreeCrownDelineation.},
issn = {1433-3058}
}
@article{DeepForest,
author = {Weinstein, Ben G. and Marconi, Sergio and Aubry-Kientz, Mélaine and Vincent, Gregoire and Senyondo, Henry and White, Ethan P.},
title = {DeepForest: A Python package for RGB deep learning tree crown delineation},
journal = {Methods in Ecology and Evolution},
volume = {11},
number = {12},
pages = {1743-1751},
keywords = {crown delineation, deep learning, forests, NEON, remote sensing, RGB, tree crowns},
doi = {10.1111/2041-210X.13472},
abstract = {Abstract Remote sensing of forested landscapes can transform the speed, scale and cost of forest research. The delineation of individual trees in remote sensing images is an essential task in forest analysis. Here we introduce a new Python package, DeepForest that detects individual trees in high resolution RGB imagery using deep learning. While deep learning has proven highly effective in a range of computer vision tasks, it requires large amounts of training data that are typically difficult to obtain in ecological studies. DeepForest overcomes this limitation by including a model pretrained on over 30 million algorithmically generated crowns from 22 forests and fine-tuned using 10,000 hand-labelled crowns from six forests. The package supports the application of this general model to new data, fine tuning the model to new datasets with user labelled crowns, training new models and evaluating model predictions. This simplifies the process of using and retraining deep learning models for a range of forests, sensors and spatial resolutions. We illustrate the workflow of DeepForest using data from the National Ecological Observatory Network, a tropical forest in French Guiana, and street trees from Portland, Oregon.},
year = {2020}
}
@misc{NEONdata,
author = {Ben Weinstein and Sergio Marconi and Ethan White},
title = {Data for the NeonTreeEvaluation Benchmark (0.2.2)},
year = {2022},
publisher = {Zenodo},
doi = {10.5281/zenodo.5914554},
}
@ARTICLE{lidar_benchmark_2,
author={Wang, Yunsheng and Hyyppä, Juha and Liang, Xinlian and Kaartinen, Harri and Yu, Xiaowei and Lindberg, Eva and Holmgren, Johan and Qin, Yuchu and Mallet, Clément and Ferraz, António and Torabzadeh, Hossein and Morsdorf, Felix and Zhu, Lingli and Liu, Jingbin and Alho, Petteri},
journal={IEEE Transactions on Geoscience and Remote Sensing},
title={International Benchmarking of the Individual Tree Detection Methods for Modeling 3-D Canopy Structure for Silviculture and Forest Ecology Using Airborne Laser Scanning},
year={2016},
volume={54},
number={9},
pages={5011-5027},
keywords={Vegetation;Benchmark testing;Three-dimensional displays;Remote sensing;Solid modeling;Geospatial analysis;Geography;Airborne laser scanning (ALS);benchmark;canopy structure;crown class;individual tree detection (ITD);LiDAR;point cloud;subordinate tree},
doi={10.1109/TGRS.2016.2543225}}
@ARTICLE{gan_data_augment,
AUTHOR={Sun, Chenxin and Huang, Chengwei and Zhang, Huaiqing and Chen, Bangqian and An, Feng and Wang, Liwen and Yun, Ting },
TITLE={Individual Tree Crown Segmentation and Crown Width Extraction From a Heightmap Derived From Aerial Laser Scanning Data Using a Deep Learning Framework},
JOURNAL={Frontiers in Plant Science},
VOLUME={13},
YEAR={2022},
DOI={10.3389/fpls.2022.914974},
ISSN={1664-462X},
ABSTRACT={Deriving individual tree crown (ITC) information from light detection and ranging (LiDAR) data is of great significance to forest resource assessment and smart management. After proof-of-concept studies, advanced deep learning methods have been shown to have high efficiency and accuracy in remote sensing data analysis and geoscience problem solving. This study proposes a novel concept for synergetic use of the YOLO-v4 deep learning network based on heightmaps directly generated from airborne LiDAR data for ITC segmentation and a computer graphics algorithm for refinement of the segmentation results involving overlapping tree crowns. This concept overcomes the limitations experienced by existing ITC segmentation methods that use aerial photographs to obtain texture and crown appearance information and commonly encounter interference due to heterogeneous solar illumination intensities or interlacing branches and leaves. Three generative adversarial networks (WGAN, CycleGAN, and SinGAN) were employed to generate synthetic images. These images were coupled with manually labeled training samples to train the network. Three forest plots, namely, a tree nursery, forest landscape and mixed tree plantation, were used to verify the effectiveness of our approach. The results showed that the overall recall of our method for detecting ITCs in the three forest plot types reached 83.6\%, with an overall precision of 81.4\%. Compared with reference field measurement data, the coefficient of determination <italic>R</italic><sup>2</sup> was ≥ 79.93\% for tree crown width estimation, and the accuracy of our deep learning method was not influenced by the values of key parameters, yielding 3.9\% greater accuracy than the traditional watershed method. The results demonstrate an enhancement of tree crown segmentation in the form of a heightmap for different forest plot types using the concept of deep learning, and our method bypasses the visual complications arising from aerial images featuring diverse textures and unordered scanned points with irregular geometrical properties.}
}
@Article{albumentations,
AUTHOR = {Buslaev, Alexander and Iglovikov, Vladimir I. and Khvedchenya, Eugene and Parinov, Alex and Druzhinin, Mikhail and Kalinin, Alexandr A.},
TITLE = {Albumentations: Fast and Flexible Image Augmentations},
JOURNAL = {Information},
VOLUME = {11},
YEAR = {2020},
NUMBER = {2},
ARTICLE-NUMBER = {125},
ISSN = {2078-2489},
DOI = {10.3390/info11020125}
}
@article{lidar_classification,
author = {Diab, Ahmed and Kashef, Rasha and Shaker, Ahmed},
title = {Deep Learning for LiDAR Point Cloud Classification in Remote Sensing},
journal = {Sensors (Basel)},
volume = {22},
number = {20},
pages = {7868},
year = {2022},
month = {10},
doi = {10.3390/s22207868},
pmid = {36298220},
pmcid = {PMC9609839}
}
@article{lidar_rgb_wst,
title = {Individual tree segmentation and tree species classification in subtropical broadleaf forests using UAV-based LiDAR, hyperspectral, and ultrahigh-resolution RGB data},
journal = {Remote Sensing of Environment},
volume = {280},
pages = {113143},
year = {2022},
issn = {0034-4257},
doi = {10.1016/j.rse.2022.113143},
author = {Haiming Qin and Weiqi Zhou and Yang Yao and Weimin Wang},
keywords = {Hyperspectral data, LiDAR data, Ultrahigh-resolution RGB imagery, Individual tree segmentation, Tree species classification, Subtropical broadleaf forests},
abstract = {Accurate classification of individual tree species is essential for inventorying, managing, and protecting forest resources. Individual tree species classification in subtropical forests remains challenging as existing individual tree segmentation algorithms typically result in over-segmentation in subtropical broadleaf forests, in which tree crowns often have multiple peaks. In this study, we proposed a watershed-spectral-texture-controlled normalized cut (WST-Ncut) algorithm, and applied it to delineate individual trees in a subtropical broadleaf forest situated in Shenzhen City of southern China (114°23′28″E, 22°43′50″N). Using this algorithm, we first obtained accurate crown boundary of individual broadleaf trees. We then extracted different suites of vertical structural, spectral, and textural features from UAV-based LiDAR, hyperspectral, and ultrahigh-resolution RGB data, and used these features as inputs to a random forest classifier to classify 18 tree species. The results showed that the proposed WST-Ncut algorithm could reduce the over-segmentation of the watershed segmentation algorithm, and thereby was effective for delineating individual trees in subtropical broadleaf forests (Recall = 0.95, Precision = 0.86, and F-score = 0.91). Combining the structural, spectral, and textural features of individual trees provided the best tree species classification results, with overall accuracy reaching 91.8\%, which was 10.2\%, 13.6\%, and 19.0\% higher than that of using spectral, structural, and textural features alone, respectively. In addition, results showed that better individual tree segmentation would lead to higher accuracy of tree species classification, but the increase of the number of tree species would result in the decline of classification accuracy.}
}
@Article{lidar_rgb_acnet,
AUTHOR = {Li, Yingbo and Chai, Guoqi and Wang, Yueting and Lei, Lingting and Zhang, Xiaoli},
TITLE = {ACE R-CNN: An Attention Complementary and Edge Detection-Based Instance Segmentation Algorithm for Individual Tree Species Identification Using UAV RGB Images and LiDAR Data},
JOURNAL = {Remote Sensing},
VOLUME = {14},
YEAR = {2022},
NUMBER = {13},
ARTICLE-NUMBER = {3035},
ISSN = {2072-4292},
ABSTRACT = {Accurate and automatic identification of tree species information at the individual tree scale is of great significance for fine-scale investigation and management of forest resources and scientific assessment of forest ecosystems. Despite the fact that numerous studies have been conducted on the delineation of individual tree crown and species classification using drone high-resolution red, green and blue (RGB) images, and Light Detection and Ranging (LiDAR) data, performing the above tasks simultaneously has rarely been explored, especially in complex forest environments. In this study, we improve upon the state of the Mask region-based convolution neural network (Mask R-CNN) with our proposed attention complementary network (ACNet) and edge detection R-CNN (ACE R-CNN) for individual tree species identification in high-density and complex forest environments. First, we propose ACNet as the feature extraction backbone network to fuse the weighted features extracted from RGB images and canopy height model (CHM) data through an attention complementary module, which is able to selectively fuse weighted features extracted from RGB and CHM data at different scales, and enables the network to focus on more effective information. Second, edge loss is added to the loss function to improve the edge accuracy of the segmentation, which is calculated through the edge detection filter introduced in the Mask branch of Mask R-CNN. We demonstrate the performance of ACE R-CNN for individual tree species identification in three experimental areas of different tree species in southern China with precision (P), recall (R), F1-score, and average precision (AP) above 0.9. Our proposed ACNet–the backbone network for feature extraction–has better performance in individual tree species identification compared with the ResNet50-FPN (feature pyramid network). The addition of the edge loss obtained by the Sobel filter further improves the identification accuracy of individual tree species and accelerates the convergence speed of the model training. This work demonstrates the improved performance of ACE R-CNN for individual tree species identification and provides a new solution for tree-level species identification in complex forest environments, which can support carbon stock estimation and biodiversity assessment.},
DOI = {10.3390/rs14133035}
}
@ARTICLE{watershed,
author={Vincent, L. and Soille, P.},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Watersheds in digital spaces: an efficient algorithm based on immersion simulations},
year={1991},
volume={13},
number={6},
pages={583-598},
keywords={Morphology;Oceans;Computational modeling;Gray-scale;Image segmentation;Digital elevation models;Surfaces;Image processing;Floods;Digital images},
doi={10.1109/34.87344}
}
@article{lidar_watershed,
author = {Doo-Ahn Kwak and Woo-Kyun Lee and Jun-Hak Lee and Greg S. Biging and Peng Gong},
title = {Detection of individual trees and estimation of tree height using LiDAR data},
journal = {Journal of Forest Research},
year = {2007},
volume = {12},
number = {6},
pages = {425--434},
doi = {10.1007/s10310-007-0041-9},
issn = {1610-7403},
abstract = {For estimation of tree parameters at the single-tree level using light detection and ranging (LiDAR), detection and delineation of individual trees is an important starting point. This paper presents an approach for delineating individual trees and estimating tree heights using LiDAR in coniferous (Pinus koraiensis, Larix leptolepis) and deciduous (Quercus spp.) forests in South Korea. To detect tree tops, the extended maxima transformation of morphological image-analysis methods was applied to the digital canopy model (DCM). In order to monitor spurious local maxima in the DCM, which cause false tree tops, different h values in the extended maxima transformation were explored. For delineation of individual trees, watershed segmentation was applied to the distance-transformed image from the detected tree tops. The tree heights were extracted using the maximum value within the segmented crown boundary. Thereafter, individual tree data estimated by LiDAR were compared to the field measurement data under five categories (correct delineation, satisfied delineation, merged tree, split tree, and not found). In our study, P. koraiensis, L. leptolepis, and Quercus spp. had the best detection accuracies of 68.1\% at h = 0.18, 86.7\% at h = 0.12, and 67.4\% at h = 0.02, respectively. The coefficients of determination for tree height estimation were 0.77, 0.80, and 0.74 for P. koraiensis, L. leptolepis, and Quercus spp., respectively.}
}
@incollection{rgb_analytical,
author = {Marilia Ferreira Gomes and Philippe Maillard},
title = {Detection of Tree Crowns in Very High Spatial Resolution Images},
booktitle = {Environmental Applications of Remote Sensing},
publisher = {IntechOpen},
address = {Rijeka},
year = {2016},
editor = {Maged Marghany},
chapter = {2},
doi = {10.5772/62122},
}
@article{local-maximum,
title = {Local Maximum Filtering for the Extraction of Tree Locations and Basal Area from High Spatial Resolution Imagery},
journal = {Remote Sensing of Environment},
volume = {73},
number = {1},
pages = {103-114},
year = {2000},
issn = {0034-4257},
doi = {10.1016/S0034-4257(00)00101-2},
author = {Mike Wulder and K.Olaf Niemann and David G. Goodenough},
abstract = {In this study we investigate the use of local maximum (LM) filtering to locate trees on high spatial resolution (1-m) imagery. Results are considered in terms of commission error (falsely indicated trees) and omission error (missed trees). Tree isolation accuracy is also considered as a function of tree crown size. The success of LM filtering in locating trees depends on the size and distribution of trees in relation to the image spatial resolution. A static-sized 3×3 pixel LM filter provides an indication of the maximum number of trees that may be found in the imagery, yet high errors of commission reduce the integrity of the results. Variable window-size techniques may be applied to reduce both the errors of commission and omission, especially for larger trees. The distribution of the error by tree size is important since the large trees account for a greater proportion of the stand basal area than the smaller trees. An investigation of the success of tree identification by tree crown radius demonstrates the relationship between image spatial resolution and LM filtering success. At an image spatial resolution of 1 m, a tree crown radius of 1.5 m appears to be the minimum size for reliable identification of tree locations using LM filtering.}
}
@inproceedings{valley-following,
title={Automatic individual tree crown delineation using a valley-following algorithm and rule-based system},
author={Gougeon, Fran{\c{c}}ois A and others},
booktitle={Proc. International Forum on Automated Interpretation of High Spatial Resolution Digital Imagery for Forestry, Victoria, British Columbia, Canada},
pages={11--23},
year={1998},
organization={Citeseer},
url={https://d1ied5g1xfgpx8.cloudfront.net/pdfs/4583.pdf}
}
@phdthesis{template-matching,
author = {Pollock, Richard James},
advisor = {Woodham, Robert J.},
title = {The automatic recognition of individual trees in aerial images of forests based on a synthetic tree crown image model},
year = {1996},
isbn = {0612148157},
publisher = {The University of British Columbia (Canada)},
abstract = {The thesis of this work is that individual tree crowns can be automatically recognized in monocular high spatial resolution optical images of scenes containing boreal or cool temperate forests in a leaved state. The thesis was advanced by developing and testing an automatic tree crown recognition procedure that is based on a model of the image formation process at the scale of an individual tree. This model provides a means of applying specific scene and image formation knowledge to the recognition task. The procedure associates instances of a three-dimensional shape description with locations in a scene image such that the descriptions estimate the visible scene extent of tree crowns that existed at the corresponding scene locations when the image was acquired. This provides an estimate of the average horizontal diameter of the vertical projection of individual recognized tree crowns, and a basis for species classification. This work makes a contribution to the overall effort to increase the level of automation in forest type mapping. This work also introduces and demonstrates the use of a pre-defined image model to support the manual acquisition of a sample of unmodelled tree crown image properties, and the use of constraints related to the spatial relationships among multiple neighbouring candidate recognition instances to resolve image interpretation conflicts.The procedure was tested with a scene of mixed uneven-aged forests in which the trees represent a wide variety of species, size, and growing conditions. The results were assessed on the basis of ground reference data and compared to those produced by human interpreters. The scene represented a greater level of difficulty than that which has been addressed by previous attempts at automating the tree crown recognition task. The test results show that the procedure was able to largely accommodate the variation represented by the test scene, but that human interpreters were better able to accommodate irregularities in tree crown form and irradiance that were caused by tight vertical and horizontal spacing of the crowns.},
note = {AAINN14815},
url = {https://dx.doi.org/10.14288/1.0051597}
}
@INPROCEEDINGS{yolo,
author={Redmon, Joseph and Divvala, Santosh and Girshick, Ross and Farhadi, Ali},
booktitle={2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
title={You Only Look Once: Unified, Real-Time Object Detection},
year={2016},
pages={779-788},
keywords={Computer architecture;Microprocessors;Object detection;Training;Real-time systems;Neural networks;Pipelines},
doi={10.1109/CVPR.2016.91}
}
@article{HAO2021112,
title = {Automated tree-crown and height detection in a young forest plantation using mask region-based convolutional neural network (Mask R-CNN)},
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
volume = {178},
pages = {112-123},
year = {2021},
issn = {0924-2716},
doi = {https://doi.org/10.1016/j.isprsjprs.2021.06.003},
author = {Zhenbang Hao and Lili Lin and Christopher J. Post and Elena A. Mikhailova and Minghui Li and Yan Chen and Kunyong Yu and Jian Liu},
keywords = {Deep learning, Instance segmentation, Tree-crown delineation, Tree height, UAV imagery, Plantation forest},
abstract = {Tree-crown and height are primary tree measurements in forest inventory. Convolutional neural networks (CNNs) are a class of neural networks, which can be used in forest inventory; however, no prior studies have developed a CNN model to detect tree crown and height simultaneously. This study is the first-of-its-kind that explored training a mask region-based convolutional neural network (Mask R-CNN) for automatically and concurrently detecting discontinuous tree crown and height of Chinese fir (Cunninghamia lanceolata (Lamb) Hook) in a plantation. A DJI Phantom4-Multispectral Unmanned Aerial Vehicle (UAV) was used to obtain high-resolution images of the study site, Shunchang County, China. Tree crown and height of Chinese fir was manually delineated and derived from this UAV imagery. A portion of the ground-truthed tree height values were used as a test set, and the remaining measurements were used as the model training data. Six different band combinations and derivations of the UAV imagery were used to detect tree crown and height, respectively (Multi band-DSM, RGB-DSM, NDVI-DSM, Multi band-CHM, RGB-CHM, and NDVI-CHM combination). The Mask R-CNN model with the NDVI-CHM combination achieved superior performance. The accuracy of Chinese fir’s individual tree-crown detection was considerable (F1 score = 84.68\%), the Intersection over Union (IoU) of tree crown delineation was 91.27\%, and tree height estimates were highly correlated with the height from UAV imagery (R2 = 0.97, RMSE = 0.11 m, rRMSE = 4.35\%) and field measurement (R2 = 0.87, RMSE = 0.24 m, rRMSE = 9.67\%). Results demonstrate that the input image with an CHM achieves higher accuracy of tree crown delineation and tree height assessment compared to an image with a DSM. The accuracy and efficiency of Mask R-CNN has a great potential to assist the application of remote sensing in forests.}
}
@article{VENTURA2024103848,
title = {Individual tree detection in large-scale urban environments using high-resolution multispectral imagery},
journal = {International Journal of Applied Earth Observation and Geoinformation},
volume = {130},
pages = {103848},
year = {2024},
issn = {1569-8432},
doi = {https://doi.org/10.1016/j.jag.2024.103848},
url = {https://www.sciencedirect.com/science/article/pii/S1569843224002024},
author = {Jonathan Ventura and Camille Pawlak and Milo Honsberger and Cameron Gonsalves and Julian Rice and Natalie L.R. Love and Skyler Han and Viet Nguyen and Keilana Sugano and Jacqueline Doremus and G. Andrew Fricker and Jenn Yost and Matt Ritter},
keywords = {Tree detection, Urban forests, Multispectral imagery, Aerial imagery, Computer vision, Object detection, Deep learning, Convolutional neural network},
abstract = {Systematic maps of urban forests are useful for regional planners and ecologists to understand the spatial distribution of trees in cities. However, manually-created urban forest inventories are expensive and time-consuming to create and typically do not provide coverage of private land. Toward the goal of automating urban forest inventory through machine learning techniques, we performed a comparative study of methods for automatically detecting and localizing trees in multispectral aerial imagery of urban environments, and introduce a novel method based on convolutional neural network regression. Our evaluation is supported by a new dataset of over 1,500 images and almost 100,000 tree annotations, covering eight cities, six climate zones, and three image capture years. Our method outperforms previous methods, achieving 73.6\% precision and 73.3\% recall when trained and tested in Southern California, and 76.5\% precision 72.0\% recall when trained and tested across the entire state. To demonstrate the scalability of the technique, we produced the first map of trees across the entire urban forest of California. The map we produced provides important data for the planning and management of California’s urban forest, and establishes a proven methodology for potentially producing similar maps nationally and globally in the future.}
}
@Manual{QGIS_software,
title = {QGIS Geographic Information System},
author = {{QGIS Development Team}},
organization = {QGIS Association},
url = {https://www.qgis.org},
}
@misc{AHN_point_cloud_viewer,
author = {{Actueel Hoogtebestand Nederland}},
title = {{AHN-puntenwolkenviewer (AHN point cloud viewer)}},
url = {https://www.ahn.nl/ahn-puntenwolkenviewer},
note = {Accessed: 2024-07-31}
}
@misc{google_street_view,
author = {Google},
title = {Google Street View},
year = {2024},
url = {https://www.google.com/streetview/},
note = {Accessed: 2024-07-31}
}