-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscene.bib
135 lines (112 loc) · 9.9 KB
/
scene.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
@misc{noauthor_mpi_nodate,
title = {{MPI} {Sintel} {Dataset}},
url = {http://sintel.is.tue.mpg.de/},
urldate = {2017-01-11},
file = {MPI Sintel Dataset:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\9SITXDR7\\sintel.is.tue.mpg.de.html:text/html}
}
@misc{noauthor_deep_nodate,
title = {Deep {Discrete} {Flow} {\textbar} {Perceiving} {Systems} - {Max} {Planck} {Institute} for {Intelligent} {Systems}},
url = {https://ps.is.tuebingen.mpg.de/publications/guney2016accv},
urldate = {2017-01-11},
file = {Deep Discrete Flow | Perceiving Systems - Max Planck Institute for Intelligent Systems:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\H7XP76H8\\guney2016accv.html:text/html}
}
@misc{noauthor_determining_nodate,
title = {{DETERMINING} {THE} {EGO}-{MOTION} {OF} {AN} {UNCALIBRATED} {CAMERA} {FROM} {INSTANTANEOUS} {OPTICAL} {FLOW} {\textbar} {Equations} {\textbar} {Camera}},
url = {https://www.scribd.com/document/32924542/DETERMINING-THE-EGO-MOTION-OF-AN-UNCALIBRATED-CAMERA-FROM-INSTANTANEOUS-OPTICAL-FLOW},
urldate = {2017-02-17},
file = {DETERMINING THE EGO-MOTION OF AN UNCALIBRATED CAMERA FROM INSTANTANEOUS OPTICAL FLOW | Equations | Camera:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\7E5XIKJB\\DETERMINING-THE-EGO-MOTION-OF-AN-UNCALIBRATED-CAMERA-FROM-INSTANTANEOUS-OPTICAL-FLOW.html:text/html}
}
@inproceedings{tian_comparison_1996,
address = {Washington, DC, USA},
series = {{CVPR} '96},
title = {Comparison of {Approaches} to {Egomotion} {Computation}},
isbn = {978-0-8186-7258-3},
url = {http://dl.acm.org/citation.cfm?id=794190.794594},
urldate = {2017-02-17},
booktitle = {Proceedings of the 1996 {Conference} on {Computer} {Vision} and {Pattern} {Recognition} ({CVPR} '96)},
publisher = {IEEE Computer Society},
author = {Tian, Tina Y. and Tomasi, Carlo and Heeger, David J.},
year = {1996},
pages = {315--}
}
@inproceedings{azuma_egomotion_2010,
title = {Egomotion estimation using planar and non-planar constraints},
doi = {10.1109/IVS.2010.5548117},
abstract = {There are two major approaches for estimating camera motion (egomotion) given an image sequence. Each approach has own strengths and weaknesses. One approach is the feature based methods. In this approach the point feature correspondences are taken as the input. Since initially the depths of point features are unknown, the egomotion is estimated by the depth independent epipolar constraints on the point feature correspondences. This approach is robust in practice, but is relatively limited in accuracy since it exploits no structure assumption, such as planarity. The other approach, termed the direct method, has the advantage in its accuracy. In this method, the egomotion is estimated as the parameters of a homography by directly aligning the planar potion of two images. The direct method may be preferable in the cases with known planes that are persistent in the view. The on-board camera system for ground vehicles is a representative example. Despite the potential accuracy, the direct method fails when the plane lacks proper texture. We propose an egomotion estimation method that is based on both the homographic constraint on a planar region, and on the epipolar constraint on generally non-planar regions, so that the both kinds of visual cues contribute to the estimation. We observe that the method improves the egomotion estimation in robustness while retaining the comparable accuracy to the direct method.},
booktitle = {2010 {IEEE} {Intelligent} {Vehicles} {Symposium}},
author = {Azuma, T. and Sugimoto, S. and Okutomi, M.},
month = jun,
year = {2010},
keywords = {camera motion estimation, depth independent epipolar constraints, egomotion estimation, ground vehicles, homographic constraint, image sensors, image sequences, Intelligent vehicles, Land vehicles, Layout, motion estimation, nonplanar constraints, on-board camera system, parameter estimation, planar constraints, Robustness, Simultaneous localization and mapping, Smart cameras, traffic engineering computing, USA Councils},
pages = {855--862},
file = {IEEE Xplore Abstract Record:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\6JQW8RTV\\5548117.html:text/html;IEEE Xplore Full Text PDF:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\ANRWGKEW\\Azuma et al. - 2010 - Egomotion estimation using planar and non-planar c.pdf:application/pdf}
}
@article{cesic_radar_2016,
title = {Radar and stereo vision fusion for multitarget tracking on the special {Euclidean} group},
volume = {83},
issn = {0921-8890},
url = {http://www.sciencedirect.com/science/article/pii/S0921889015303286},
doi = {10.1016/j.robot.2016.05.001},
abstract = {Reliable scene analysis, under varying conditions, is an essential task in nearly any assistance or autonomous system application, and advanced driver assistance systems (ADAS) are no exception. ADAS commonly involve adaptive cruise control, collision avoidance, lane change assistance, traffic sign recognition, and parking assistance—with the ultimate goal of producing a fully autonomous vehicle. The present paper addresses detection and tracking of moving objects within the context of ADAS. We use a multisensor setup consisting of a radar and a stereo camera mounted on top of a vehicle. We propose to model the sensors uncertainty in polar coordinates on Lie Groups and perform the objects state filtering on Lie groups, specifically, on the product of two special Euclidean groups, i.e., SE ( 2 ) 2 . To this end, we derive the designed filter within the framework of the extended Kalman filter on Lie groups. We assert that the proposed approach results with more accurate uncertainty modeling, since used sensors exhibit contrasting measurement uncertainty characteristics and the predicted target motions result with banana-shaped uncertainty contours. We believe that accurate uncertainty modeling is an important ADAS topic, especially when safety applications are concerned. To solve the multitarget tracking problem, we use the joint integrated probabilistic data association filter and present necessary modifications in order to use it on Lie groups. The proposed approach is tested on a real-world dataset collected with the described multisensor setup in urban traffic scenarios.},
urldate = {2017-04-26},
journal = {Robotics and Autonomous Systems},
author = {Ćesić, Josip and Marković, Ivan and Cvišić, Igor and Petrović, Ivan},
month = sep,
year = {2016},
keywords = {advanced driver assistance systems, Detection and tracking of moving objects, Joint integrated probabilistic data association, Radar, Stereo camera},
pages = {338--348},
file = {ScienceDirect Snapshot:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\HUGJTN7Z\\S0921889015303286.html:text/html}
}
@INPROCEEDINGS{898370,
author={G. P. Stein and O. Mano and A. Shashua},
booktitle={Proceedings of the IEEE Intelligent Vehicles Symposium 2000 (Cat. No.00TH8511)},
title={A robust method for computing vehicle ego-motion},
year={2000},
volume={},
number={},
pages={362-368},
keywords={automated highways;computer vision;image motion analysis;image sequences;matrix algebra;parameter estimation;probability;road vehicles;cluttered scenes;direct methods;glare;global probability function;moving objects;probability distribution matrices;rain;rear view mirror;robust method;vehicle ego-motion;Cameras;Computer vision;Layout;Mechanical sensors;Mirrors;Motion measurement;Rain;Road vehicles;Robustness;Testing},
doi={10.1109/IVS.2000.898370},
ISSN={},
month={},}
@INPROCEEDINGS{6906584,
author={C. Forster and M. Pizzoli and D. Scaramuzza},
booktitle={2014 IEEE International Conference on Robotics and Automation (ICRA)},
title={SVO: Fast semi-direct monocular visual odometry},
year={2014},
volume={},
number={},
pages={15-22},
keywords={autonomous aerial vehicles;control engineering computing;distance measurement;embedded systems;motion estimation;probability;robot vision;stereo image processing;3D points;GPS-denied environments;SVO;consumer laptop;fast semidirect monocular visual odometry;high frame-rate motion estimation;micro-aerial-vehicle state-estimation;onboard embedded computer;open-source software;outlier measurements;pixel intensities;probabilistic mapping method;subpixel precision;Cameras;Feature extraction;Motion estimation;Optimization;Robustness;Three-dimensional displays;Tracking},
doi={10.1109/ICRA.2014.6906584},
ISSN={1050-4729},
month={May},}
@article{saurer:hal-01466853,
TITLE = {{Homography Based Egomotion Estimation with a Common Direction}},
AUTHOR = {Saurer, Olivier and Vasseur, Pascal and Boutteau, R{\'e}mi and Demonceaux, C{\'e}dric and Pollefeys, Marc and Fraundorfer, Friedrich},
URL = {https://hal.archives-ouvertes.fr/hal-01466853},
JOURNAL = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}},
PUBLISHER = {{Institute of Electrical and Electronics Engineers}},
VOLUME = {39},
NUMBER = {2},
PAGES = {327-341},
YEAR = {2017},
MONTH = Feb,
DOI = {10.1109/TPAMI.2016.2545663},
KEYWORDS = {Index Terms-Computer vision ; egomotion estimation ; homography estimation ; structure-from-motion },
PDF = {https://hal.archives-ouvertes.fr/hal-01466853/file/Paper_Saurer.pdf},
HAL_ID = {hal-01466853},
HAL_VERSION = {v1},
}
@inproceedings{Cheda2012MonocularEE,
title={Monocular Egomotion Estimation based on Image Matching},
author={Diego Cheda and Daniel Ponsa and Antonio Manuel L{\'o}pez Pe{\~n}a},
booktitle={ICPRAM},
year={2012}
}
@inproceedings{zhou2017unsupervised,
Author = {Zhou, Tinghui and Brown, Matthew and Snavely, Noah and Lowe, David G.},
Title = {Unsupervised Learning of Depth and Ego-Motion from Video},
Booktitle = {CVPR},
Year = {2017}
}