-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrefs.bib
369 lines (329 loc) · 21.9 KB
/
refs.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
@inbook{mitrou,
author = {Mitrou, Lilian},
year = {2019},
month = {04},
pages = {},
title = {AI and GDPR Study Prof MItrou Mar19 FINAL}
}
@inproceedings{dinant,
title={Application of Convention 108 to the profiling mechanism: some ideas for the future work of the consultative committee (T-PD): final version},
author={Jean-Marc Dinant and Nathalie Lefever and Christophe Lazaro and Yves Poullet and Antoinette Rouvroy},
year={2008},
url={https://api.semanticscholar.org/CorpusID:115046048}
}
@book{infocomm2022,
author = {{Information Commissioner}},
title = {Information Commissioner’s Annual Report and Financial Statements 2022/23},
year = {2023},
month = {July},
publisher = {HC},
number = {1440},
pages = {12}
}
@article{kamarinou2016,
title={Machine Learning with Personal Data},
author={Dimitra Kamarinou and Christopher Millard and Jatinder Singh},
journal={ERN: Other European Economics: Microeconomics \& Industrial Organization (Topic)},
year={2016},
url={https://api.semanticscholar.org/CorpusID:63785275}
}
@inproceedings{hildebrandt2008,
title={Profiling the European Citizen, Cross-Disciplinary Perspectives},
author={Mireille Hildebrandt and Serge Gutwirth},
year={2008},
url={https://api.semanticscholar.org/CorpusID:44785133}
}
@inproceedings{andrade2010,
title={Data Protection, Privacy and Identity: Distinguishing Concepts and Articulating Rights},
author={Norberto Nuno Gomes de Andrade},
booktitle={PrimeLife},
year={2010},
url={https://api.semanticscholar.org/CorpusID:17381091}
}
@article{conrad2017,
author = {Conrad, Conrad},
year = {2017},
month = {12},
pages = {740-744},
title = {Künstliche Intelligenz — Die Risiken für den Datenschutz},
volume = {41},
journal = {Datenschutz und Datensicherheit - DuD},
doi = {10.1007/s11623-017-0870-4}
}
@book{cmrec2020,
title = {{Recommendation CM/Rec(2020)1 of the Committee of Ministers to member States on the human rights impacts of algorithmic systems}},
author = {{Committee of Ministers}},
year = {2020},
month = {April},
note = {Adopted by the Committee of Ministers on 8 April 2020 at the 1373rd meeting of the Ministers’ Deputies}
}
@book{wagner2018,
title = "Algorithms and Human Rights: Study on the human rights dimensions of automated data processing techniques and possible regulatory implications",
author = "Ben Wagner and Wolfgang Schulz and Karmen Turk and {de la Chapelle}, {Bertrand de la} and Julia H{\"o}rnle and Tanja Kersevan-Smokvina and Matthias Kettemann and D{\"o}rte Nieland and Arseny Nedyak and Pēteris Podvinskis and Thomas Schneider and Sophie Stalla-Bourdillon and Dirk Voorhoof",
year = "2018",
language = "English",
publisher = "Council of Europe",
}
@article{compas_analysis,
author = {Jeff Larson and Surya Mattu and Lauren Kirchner and Julia Angwin},
title = {How We Analyzed the COMPAS Recidivism Algorithm},
journal = {ProPublica},
year = {2016},
month = {May},
day = {23},
url = {https://www.propublica.org/article/how-we-analyzed-the-compas-recidivism-algorithm}
}
@inbook{gillespie2014,
author = {Gillespie, Tarleton},
year = {2013},
month = {01},
pages = {},
title = {The Relevance of Algorithms},
isbn = {9780262525374},
doi = {10.7551/mitpress/9780262525374.003.0009}
}
@article{MANTELERO2018754,
title = {AI and Big Data: A blueprint for a human rights, social and ethical impact assessment},
journal = {Computer Law \& Security Review},
volume = {34},
number = {4},
pages = {754-772},
year = {2018},
issn = {0267-3649},
doi = {https://doi.org/10.1016/j.clsr.2018.05.017},
url = {https://www.sciencedirect.com/science/article/pii/S0267364918302012},
author = {Alessandro Mantelero},
keywords = {Data protection, Impact assessment, Data protection impact assessment, Human rights, Human rights impact assessment, Ethical impact assessment, Social impact assessment, General Data Protection Regulation},
abstract = {The use of algorithms in modern data processing techniques, as well as data-intensive technological trends, suggests the adoption of a broader view of the data protection impact assessment. This will force data controllers to go beyond the traditional focus on data quality and security, and consider the impact of data processing on fundamental rights and collective social and ethical values. Building on studies of the collective dimension of data protection, this article sets out to embed this new perspective in an assessment model centred on human rights (Human Rights, Ethical and Social Impact Assessment-HRESIA). This self-assessment model intends to overcome the limitations of the existing assessment models, which are either too closely focused on data processing or have an extent and granularity that make them too complicated to evaluate the consequences of a given use of data. In terms of architecture, the HRESIA has two main elements: a self-assessment questionnaire and an ad hoc expert committee. As a blueprint, this contribution focuses mainly on the nature of the proposed model, its architecture and its challenges; a more detailed description of the model and the content of the questionnaire will be discussed in a future publication drawing on the ongoing research.}
}
@article{nemitz2018,
title={Constitutional democracy and technology in the age of artificial intelligence},
author={Paul Nemitz},
journal={Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences},
year={2018},
volume={376},
url={https://api.semanticscholar.org/CorpusID:53501707}
}
@article{moon2023,
title = {Comparison of personal information de-identification policies and laws within the EU, the US, Japan, and South Korea},
journal = {Government Information Quarterly},
volume = {40},
number = {2},
pages = {101805},
year = {2023},
issn = {0740-624X},
doi = {https://doi.org/10.1016/j.giq.2023.101805},
url = {https://www.sciencedirect.com/science/article/pii/S0740624X23000059},
author = {Moon-Ho Joo and Hun-Yeong Kwon},
keywords = {Personal information, de-identification, Anonymization, Pseudonymization, personal information protection act},
abstract = {In the era of big data, data creates new added value by collecting, analyzing, and transforming the thoughts and actions of economic members and creating insights that can predict the future. This means that in the near future, data-driven decisions based on data, rather than subjectivity or experience, will be the driving force behind society. However, since valuable and useful data is bound to be generated from personal information, how to safely utilize personal information is becoming an important topic in the big data era. To protect personal information in this environment, data services and database providers have increased their focus on the implementation of de-identification, a technique that can protect personal information while maintaining the usefulness of the data. Moreover, many countries have introduced new policies and laws focusing on the de-identification of personal information. Accordingly, this paper compares and analyzes how the European Union, the United States, Japan, and South Korea have recently adopted the concept of de-identification in their own personal information protection laws, and presents common trends and implications. As a comparative framework, each country's conceptual classification system related to de-identification, legal treatment, data controller obligations, and de-identification procedures was included. This study identifies the shifts made in each country's regulatory system following the introduction of the concept of de-identification. These include a shift from a binary approach to an approach that considers the identifiability spectrum, from a belief in anonymization to regulation from a risk management perspective, and from a focus on de-identification methods, to responsibility for follow-up management. This study contributes to the establishment of specialized knowledge of de-identification practices by empirically examining the current status of de-identification information-related legal systems adopted by major countries/regions. Also, the study proved the actualization of theory by confirming that the de-identification policy approach from the perspective of risk management is actually applied to the laws of each country. In addition, the attempt to present a framework for systematic comparison of de-identification systems by country provides a new perspective that can trace the trend of future de-identification system changes on a consistent basis. In addition, this study brings the gradual expansion of data policy research by expanding the research on de-identified information, which has been studied mainly in Europe and the United States, to case studies in Japan and South Korea.}
}
@article{pasquale2016,
title={The Black Box Society: The Secret Algorithms that Control Money and Information, by Frank Pasquale. Cambridge: Harvard University Press, 2015. 320 pp. ISBN 978–0674368279},
author={Alan Rubel},
journal={Business Ethics Quarterly},
year={2016},
volume={26},
pages={568 - 571},
url={https://api.semanticscholar.org/CorpusID:151540271}
}
@article{zuiderveen2016,
title={Should We Worry About Filter Bubbles?},
author={Frederik J. Zuiderveen Borgesius and Damian Trilling and Judith Moeller and Bal{\'a}zs Bod{\'o} and Claes H. de Vreese and Natali Helberger},
journal={Information Systems: Behavioral \& Social Methods eJournal},
year={2016},
url={https://api.semanticscholar.org/CorpusID:52211897}
}
@article{bond2012,
author = {Bond, Robert and Fariss, Christopher and Jones, Jason and others},
title = {A 61-million-person experiment in social influence and political mobilization},
journal = {Nature},
volume = {489},
pages = {295--298},
year = {2012},
doi = {10.1038/nature11421},
url = {https://doi.org/10.1038/nature11421}
}
@article{zittrain2014,
author = {Zittrain, Jonathan L.},
title = {Engineering an Election},
journal = {Harvard Law Review Forum},
volume = {127},
pages = {335},
year = {2014},
note = {Harvard Public Law Working Paper No. 14-28, 7 Pages Posted: 23 Jun 2014 Last revised: 26 Jul 2015},
institution = {Harvard Law School and Harvard Kennedy School of Government; Harvard School of Engineering and Applied Sciences; Berkman Center for Internet \& Society; Harvard University - Harvard Kennedy School (HKS)},
date_written = {June 20, 2014}
}
@online{deepfake2022,
author = {Wakefield, Jane},
title = {Deepfake presidents used in Russia-Ukraine war},
organization = {BBC News},
year = {2022},
month = {March},
day = {18},
url = {https://www.bbc.com/news/technology-60780142},
urldate = {2024-03-29}
}
@online{deepfake2024,
author = {Abo Marq, Wesam},
title = {Deepfake Video of Ukraine's National Security Secretary Alleges Ukraine Involvement in Moscow Attack},
organization = {Misbar},
year = {2024},
month = {March},
day = {25},
url = {https://misbar.com/en/factcheck/2024/03/25/deepfake-video-of-ukraines-national-security-secretary-alleges-ukraine-involvement-in-moscow-attack},
urldate = {2024-03-29}
}
@article{ExplainabilityWhy,
author = {Preece, Alun},
title = {Asking ‘Why’ in AI: Explainability of intelligent systems – perspectives and challenges},
journal = {Intelligent Systems in Accounting, Finance and Management},
volume = {25},
number = {2},
pages = {63-72},
keywords = {artificial intelligence, explainability, interpretability, machine learning},
doi = {https://doi.org/10.1002/isaf.1422},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/isaf.1422},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/isaf.1422},
abstract = {Summary Recent rapid progress in machine learning (ML), particularly so-called ‘deep learning’, has led to a resurgence in interest in explainability of artificial intelligence (AI) systems, reviving an area of research dating back to the 1970s. The aim of this article is to view current issues concerning ML-based AI systems from the perspective of classical AI, showing that the fundamental problems are far from new, and arguing that elements of that earlier work offer routes to making progress towards explainable AI today.},
year = {2018}
}
@article{pawelec2022,
title={Deepfakes and democracy (theory): How synthetic audio-visual media for disinformation and hate speech threaten core democratic functions},
author={Pawelec, Maria},
journal={Digital society},
volume={1},
number={2},
pages={19},
year={2022},
publisher={Springer}
}
@article{TSAKALAKIS2021105527,
title = {The dual function of explanations: Why it is useful to compute explanations},
journal = {Computer Law \& Security Review},
volume = {41},
pages = {105527},
year = {2021},
issn = {0267-3649},
doi = {https://doi.org/10.1016/j.clsr.2020.105527},
url = {https://www.sciencedirect.com/science/article/pii/S0267364920301321},
author = {Niko Tsakalakis and Sophie Stalla-Bourdillon and Laura Carmichael and Trung Dong Huynh and Luc Moreau and Ayah Helal},
keywords = {Automated decisions, Artificial intelligence, Explainability, Explainable AI, GDPR},
abstract = {Whilst the legal debate concerning automated decision-making has been focused mainly on whether a ‘right to explanation’ exists in the GDPR, the emergence of ‘explainable Artificial Intelligence’ (XAI) has produced taxonomies for the explanation of Artificial Intelligence (AI) systems. However, various researchers have warned that transparency of the algorithmic processes in itself is not enough. Better and easier tools for the assessment and review of the socio-technical systems that incorporate automated decision-making are needed. The PLEAD project suggests that, aside from fulfilling the obligations set forth by Article 22 of the GDPR, explanations can also assist towards a holistic compliance strategy if used as detective controls. PLEAD aims to show that computable explanations can facilitate monitoring and auditing, and make compliance more systematic. Automated computable explanations can be key controls in fulfilling accountability and data-protection-by-design obligations, able to empower both controllers and data subjects. This opinion piece presents the work undertaken by the PLEAD project towards facilitating the generation of computable explanations. PLEAD leverages provenance-based technology to compute explanations as external detective controls to the benefit of data subjects and as internal detective controls to the benefit of the data controller.}
}
@techreport{EUROPEAN_PARLIAMENT_GDPR_ON_AI,
author = "{European Parliamentary Research Service}",
title = "{The Impact of the General Data Protection Regulation (GDPR) on Artificial Intelligence}",
institution = "European Parliament",
year = "2020",
type = "Study",
note = "{PE 641.530, QA-QA-02-20-399-EN-N}",
isbn = "978-92-846-6771-0",
doi = "10.2861/293",
url = "http://www.europarl.europa.eu/stoa/"
} % THIS IS HANDMADE
@techreport{bertrand2003,
title = "Are Emily and Greg More Employable than Lakisha and Jamal? A Field Experiment on Labor Market Discrimination",
author = "Bertrand, Marianne and Mullainathan, Sendhil",
institution = "National Bureau of Economic Research",
type = "Working Paper",
series = "Working Paper Series",
number = "9873",
year = "2003",
month = "July",
doi = {10.3386/w9873},
URL = "http://www.nber.org/papers/w9873",
abstract = {We perform a field experiment to measure racial discrimination in the labor market. We respond with fictitious resumes to help-wanted ads in Boston and Chicago newspapers. To manipulate perception of race, each resume is assigned either a very African American sounding name or a very White sounding name. The results show significant discrimination against African-American names: White names receive 50 percent more callbacks for interviews. We also find that race affects the benefits of a better resume. For White names, a higher quality resume elicits 30 percent more callbacks whereas for African Americans, it elicits a far smaller increase. Applicants living in better neighborhoods receive more callbacks but, interestingly, this effect does not differ by race. The amount of discrimination is uniform across occupations and industries. Federal contractors and employers who list Equal Opportunity Employer' in their ad discriminate as much as other employers. We find little evidence that our results are driven by employers inferring something other than race, such as social class, from the names. These results suggest that racial discrimination is still a prominent feature of the labor market.},
}
@incollection{ALTONJI19993143,
title = {Chapter 48 Race and gender in the labor market},
series = {Handbook of Labor Economics},
publisher = {Elsevier},
volume = {3},
pages = {3143-3259},
year = {1999},
issn = {1573-4463},
doi = {https://doi.org/10.1016/S1573-4463(99)30039-0},
url = {https://www.sciencedirect.com/science/article/pii/S1573446399300390},
author = {Joseph G. Altonji and Rebecca M. Blank},
abstract = {This chapter summarizes recent research in economics that investigates differentials by race and gender in the labor market. We start with a statistical overview of the trends in labor market outcomes by race, gender and Hispanic origin, including some simple regressions on the determinants of wages and employment. This is followed in Section 3 by an extended review of current theories about discrimination in the labor market, including recent extensions of taste-based theories, theories of occupational exclusion, and theories of statistical discrimination. Section 4 discusses empirical research that provides direct evidence of discrimination in the labor market, beyond “unexplained gaps” in wage or employment regressions. The remainder of the chapter reviews the evidence on race and gender gaps, particularly wage gaps. Section 5 reviews research on the impact of pre-market human capital differences in education and family background that differ by race and gender. Section 6 reviews the impact of differences in both the levels and the returns to experience and seniority, with discussion of the role of training and labor market search and turnover on race and gender differentials. Section 7 reviews the role of job characteristics (particularly occupational characteristics) in the gender wage gap. Section 8 reviews the smaller literature on differences in fringe benefits by gender. Section 9 is an extensive discussion of the empirical work that accounts for changes in the trends in race and gender differentials over time. Of particular interest is the new research literature that investigates the impact of widening wage inequality on race and gender wage gaps. Section 10 reviews research that relates policy changes to race and gender differentials, including anti-discrimination policy. The chapter concludes with comments about a future research agenda.}
}
@inproceedings{Goldin1999OrchestratingI,
title={Orchestrating Impartiality : The Impact of “ Blind ” Auditions on Female Musicians By},
author={Claudia Goldin and Cecilia Elena Rouse},
year={1999},
url={https://api.semanticscholar.org/CorpusID:261258493}
}
@inproceedings{Voorhoof2013TheRT,
title={The right to freedom of expression in the workplace under Article 10 ECHR},
author={Dirk Voorhoof and Patrick Humblet and Filip Dorssemont and Klaus L{\"o}rcher and Isabelle Sch{\"o}mann},
year={2013},
url={https://api.semanticscholar.org/CorpusID:155829806}
}
@misc{vanhaastert2016government,
author = {van Haastert, Hugo},
title = {Government as a Platform: Public Values in the Age of Big Data},
howpublished = {Paper for the OII IPP 2016 Conference},
year = {2016}
}
@misc{buttarelli2016privacy,
author = {Buttarelli, Giovanni},
title = {Privacy in an age of hyperconnectivity},
howpublished = {Keynote speech at the Privacy and Security Conference 2016, Rust am Neusiedler See, Austria},
year = {2016},
month = {November},
day = {7}
}
@report{coe2018report,
title = {Council of Europe Consultative Committee Report on Artificial Intelligence and Data Protection},
institution = {Council of Europe},
author={CoE Consultative Committee},
address = {Strasbourg},
year = {2018},
month = {September},
day = {17}
}
@online{tay,
author = {Kraft, Amy},
title = {Microsoft shuts down AI chatbot after it turned into a Nazi},
organization = {CBS News},
year = {2016},
month = {March},
day = {25},
url = {https://www.cbsnews.com/news/microsoft-shuts-down-ai-chatbot-after-it-turns-into-a-nazi/},
urldate = {2024-03-29}
}
@article{Schaar2010,
author = {Schaar, Peter},
title = {Privacy by Design},
journal = {Identity in the Information Society},
volume = {3},
number = {2},
pages = {267--274},
year = {2010},
doi = {10.1007/s12394-010-0055-x},
url = {https://doi.org/10.1007/s12394-010-0055-x},
abstract = {In view of rapid and dramatic technological change, it is important to take the special requirements of privacy protection into account early on, because new technological systems often contain hidden dangers which are very difficult to overcome after the basic design has been worked out. So it makes all the more sense to identify and examine possible data protection problems when designing new technology and to incorporate privacy protection into the overall design, instead of having to come up with laborious and time-consuming “patches” later on. This approach is known as “Privacy by Design” (PbD).},
issn = {1876-0678}
}
@misc{främling2020explainable,
title={Explainable AI without Interpretable Model},
author={Kary Främling},
year={2020},
eprint={2009.13996},
archivePrefix={arXiv},
url={https://doi.org/10.48550/arXiv.2009.13996},
primaryClass={cs.AI}
}
@techreport{HouseOfCommons2016RoboticsAI,
title = {{Robotics and Artificial Intelligence: Fifth Report of Session 2016–17}},
author = {{House of Commons Science and Technology Committee}},
year = 2016,
institution = {House of Commons},
type = {Report},
number = {HC 145},
address = {London},
month = 10,
url = {https://publications.parliament.uk/pa/cm201617/cmselect/cmsctech/145/145.pdf},
}