target: Drop incorrect se_lun_acl release for dynamic -> explict ACL conversion
[linux-flexiantxendom0-3.2.10.git] / drivers / target / target_core_tpg.c
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/in.h>
35 #include <linux/export.h>
36 #include <net/sock.h>
37 #include <net/tcp.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
44
45 #include "target_core_internal.h"
46
47 extern struct se_device *g_lun0_dev;
48
49 static DEFINE_SPINLOCK(tpg_lock);
50 static LIST_HEAD(tpg_list);
51
52 /*      core_clear_initiator_node_from_tpg():
53  *
54  *
55  */
56 static void core_clear_initiator_node_from_tpg(
57         struct se_node_acl *nacl,
58         struct se_portal_group *tpg)
59 {
60         int i;
61         struct se_dev_entry *deve;
62         struct se_lun *lun;
63
64         spin_lock_irq(&nacl->device_list_lock);
65         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
66                 deve = nacl->device_list[i];
67
68                 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
69                         continue;
70
71                 if (!deve->se_lun) {
72                         pr_err("%s device entries device pointer is"
73                                 " NULL, but Initiator has access.\n",
74                                 tpg->se_tpg_tfo->get_fabric_name());
75                         continue;
76                 }
77
78                 lun = deve->se_lun;
79                 spin_unlock_irq(&nacl->device_list_lock);
80                 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
81                         TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
82
83                 spin_lock_irq(&nacl->device_list_lock);
84         }
85         spin_unlock_irq(&nacl->device_list_lock);
86 }
87
88 /*      __core_tpg_get_initiator_node_acl():
89  *
90  *      spin_lock_bh(&tpg->acl_node_lock); must be held when calling
91  */
92 struct se_node_acl *__core_tpg_get_initiator_node_acl(
93         struct se_portal_group *tpg,
94         const char *initiatorname)
95 {
96         struct se_node_acl *acl;
97
98         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
99                 if (!strcmp(acl->initiatorname, initiatorname))
100                         return acl;
101         }
102
103         return NULL;
104 }
105
106 /*      core_tpg_get_initiator_node_acl():
107  *
108  *
109  */
110 struct se_node_acl *core_tpg_get_initiator_node_acl(
111         struct se_portal_group *tpg,
112         unsigned char *initiatorname)
113 {
114         struct se_node_acl *acl;
115
116         spin_lock_irq(&tpg->acl_node_lock);
117         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
118                 if (!strcmp(acl->initiatorname, initiatorname) &&
119                     !acl->dynamic_node_acl) {
120                         spin_unlock_irq(&tpg->acl_node_lock);
121                         return acl;
122                 }
123         }
124         spin_unlock_irq(&tpg->acl_node_lock);
125
126         return NULL;
127 }
128
129 /*      core_tpg_add_node_to_devs():
130  *
131  *
132  */
133 void core_tpg_add_node_to_devs(
134         struct se_node_acl *acl,
135         struct se_portal_group *tpg)
136 {
137         int i = 0;
138         u32 lun_access = 0;
139         struct se_lun *lun;
140         struct se_device *dev;
141
142         spin_lock(&tpg->tpg_lun_lock);
143         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
144                 lun = tpg->tpg_lun_list[i];
145                 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
146                         continue;
147
148                 spin_unlock(&tpg->tpg_lun_lock);
149
150                 dev = lun->lun_se_dev;
151                 /*
152                  * By default in LIO-Target $FABRIC_MOD,
153                  * demo_mode_write_protect is ON, or READ_ONLY;
154                  */
155                 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
156                         if (dev->dev_flags & DF_READ_ONLY)
157                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
158                         else
159                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
160                 } else {
161                         /*
162                          * Allow only optical drives to issue R/W in default RO
163                          * demo mode.
164                          */
165                         if (dev->transport->get_device_type(dev) == TYPE_DISK)
166                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
167                         else
168                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
169                 }
170
171                 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
172                         " access for LUN in Demo Mode\n",
173                         tpg->se_tpg_tfo->get_fabric_name(),
174                         tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
175                         (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
176                         "READ-WRITE" : "READ-ONLY");
177
178                 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
179                                 lun_access, acl, tpg, 1);
180                 spin_lock(&tpg->tpg_lun_lock);
181         }
182         spin_unlock(&tpg->tpg_lun_lock);
183 }
184
185 /*      core_set_queue_depth_for_node():
186  *
187  *
188  */
189 static int core_set_queue_depth_for_node(
190         struct se_portal_group *tpg,
191         struct se_node_acl *acl)
192 {
193         if (!acl->queue_depth) {
194                 pr_err("Queue depth for %s Initiator Node: %s is 0,"
195                         "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
196                         acl->initiatorname);
197                 acl->queue_depth = 1;
198         }
199
200         return 0;
201 }
202
203 void array_free(void *array, int n)
204 {
205         void **a = array;
206         int i;
207
208         for (i = 0; i < n; i++)
209                 kfree(a[i]);
210         kfree(a);
211 }
212
213 static void *array_zalloc(int n, size_t size, gfp_t flags)
214 {
215         void **a;
216         int i;
217
218         a = kzalloc(n * sizeof(void*), flags);
219         if (!a)
220                 return NULL;
221         for (i = 0; i < n; i++) {
222                 a[i] = kzalloc(size, flags);
223                 if (!a[i]) {
224                         array_free(a, n);
225                         return NULL;
226                 }
227         }
228         return a;
229 }
230
231 /*      core_create_device_list_for_node():
232  *
233  *
234  */
235 static int core_create_device_list_for_node(struct se_node_acl *nacl)
236 {
237         struct se_dev_entry *deve;
238         int i;
239
240         nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
241                         sizeof(struct se_dev_entry), GFP_KERNEL);
242         if (!nacl->device_list) {
243                 pr_err("Unable to allocate memory for"
244                         " struct se_node_acl->device_list\n");
245                 return -ENOMEM;
246         }
247         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
248                 deve = nacl->device_list[i];
249
250                 atomic_set(&deve->ua_count, 0);
251                 atomic_set(&deve->pr_ref_count, 0);
252                 spin_lock_init(&deve->ua_lock);
253                 INIT_LIST_HEAD(&deve->alua_port_list);
254                 INIT_LIST_HEAD(&deve->ua_list);
255         }
256
257         return 0;
258 }
259
260 /*      core_tpg_check_initiator_node_acl()
261  *
262  *
263  */
264 struct se_node_acl *core_tpg_check_initiator_node_acl(
265         struct se_portal_group *tpg,
266         unsigned char *initiatorname)
267 {
268         struct se_node_acl *acl;
269
270         acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
271         if (acl)
272                 return acl;
273
274         if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
275                 return NULL;
276
277         acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
278         if (!acl)
279                 return NULL;
280
281         INIT_LIST_HEAD(&acl->acl_list);
282         INIT_LIST_HEAD(&acl->acl_sess_list);
283         kref_init(&acl->acl_kref);
284         init_completion(&acl->acl_free_comp);
285         spin_lock_init(&acl->device_list_lock);
286         spin_lock_init(&acl->nacl_sess_lock);
287         atomic_set(&acl->acl_pr_ref_count, 0);
288         acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
289         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
290         acl->se_tpg = tpg;
291         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
292         spin_lock_init(&acl->stats_lock);
293         acl->dynamic_node_acl = 1;
294
295         tpg->se_tpg_tfo->set_default_node_attributes(acl);
296
297         if (core_create_device_list_for_node(acl) < 0) {
298                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299                 return NULL;
300         }
301
302         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
303                 core_free_device_list_for_node(acl, tpg);
304                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
305                 return NULL;
306         }
307         /*
308          * Here we only create demo-mode MappedLUNs from the active
309          * TPG LUNs if the fabric is not explictly asking for
310          * tpg_check_demo_mode_login_only() == 1.
311          */
312         if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
313             (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
314                 do { ; } while (0);
315         else
316                 core_tpg_add_node_to_devs(acl, tpg);
317
318         spin_lock_irq(&tpg->acl_node_lock);
319         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
320         tpg->num_node_acls++;
321         spin_unlock_irq(&tpg->acl_node_lock);
322
323         pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
324                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
325                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
326                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
327
328         return acl;
329 }
330 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
331
332 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
333 {
334         while (atomic_read(&nacl->acl_pr_ref_count) != 0)
335                 cpu_relax();
336 }
337
338 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
339 {
340         int i;
341         struct se_lun *lun;
342
343         spin_lock(&tpg->tpg_lun_lock);
344         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
345                 lun = tpg->tpg_lun_list[i];
346
347                 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
348                     (lun->lun_se_dev == NULL))
349                         continue;
350
351                 spin_unlock(&tpg->tpg_lun_lock);
352                 core_dev_del_lun(tpg, lun->unpacked_lun);
353                 spin_lock(&tpg->tpg_lun_lock);
354         }
355         spin_unlock(&tpg->tpg_lun_lock);
356 }
357 EXPORT_SYMBOL(core_tpg_clear_object_luns);
358
359 /*      core_tpg_add_initiator_node_acl():
360  *
361  *
362  */
363 struct se_node_acl *core_tpg_add_initiator_node_acl(
364         struct se_portal_group *tpg,
365         struct se_node_acl *se_nacl,
366         const char *initiatorname,
367         u32 queue_depth)
368 {
369         struct se_node_acl *acl = NULL;
370
371         spin_lock_irq(&tpg->acl_node_lock);
372         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
373         if (acl) {
374                 if (acl->dynamic_node_acl) {
375                         acl->dynamic_node_acl = 0;
376                         pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
377                                 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
378                                 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
379                         spin_unlock_irq(&tpg->acl_node_lock);
380                         /*
381                          * Release the locally allocated struct se_node_acl
382                          * because * core_tpg_add_initiator_node_acl() returned
383                          * a pointer to an existing demo mode node ACL.
384                          */
385                         if (se_nacl)
386                                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
387                                                         se_nacl);
388                         goto done;
389                 }
390
391                 pr_err("ACL entry for %s Initiator"
392                         " Node %s already exists for TPG %u, ignoring"
393                         " request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
394                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
395                 spin_unlock_irq(&tpg->acl_node_lock);
396                 return ERR_PTR(-EEXIST);
397         }
398         spin_unlock_irq(&tpg->acl_node_lock);
399
400         if (!se_nacl) {
401                 pr_err("struct se_node_acl pointer is NULL\n");
402                 return ERR_PTR(-EINVAL);
403         }
404         /*
405          * For v4.x logic the se_node_acl_s is hanging off a fabric
406          * dependent structure allocated via
407          * struct target_core_fabric_ops->fabric_make_nodeacl()
408          */
409         acl = se_nacl;
410
411         INIT_LIST_HEAD(&acl->acl_list);
412         INIT_LIST_HEAD(&acl->acl_sess_list);
413         kref_init(&acl->acl_kref);
414         init_completion(&acl->acl_free_comp);
415         spin_lock_init(&acl->device_list_lock);
416         spin_lock_init(&acl->nacl_sess_lock);
417         atomic_set(&acl->acl_pr_ref_count, 0);
418         acl->queue_depth = queue_depth;
419         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
420         acl->se_tpg = tpg;
421         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
422         spin_lock_init(&acl->stats_lock);
423
424         tpg->se_tpg_tfo->set_default_node_attributes(acl);
425
426         if (core_create_device_list_for_node(acl) < 0) {
427                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
428                 return ERR_PTR(-ENOMEM);
429         }
430
431         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
432                 core_free_device_list_for_node(acl, tpg);
433                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
434                 return ERR_PTR(-EINVAL);
435         }
436
437         spin_lock_irq(&tpg->acl_node_lock);
438         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
439         tpg->num_node_acls++;
440         spin_unlock_irq(&tpg->acl_node_lock);
441
442 done:
443         pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
444                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
445                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
446                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
447
448         return acl;
449 }
450 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
451
452 /*      core_tpg_del_initiator_node_acl():
453  *
454  *
455  */
456 int core_tpg_del_initiator_node_acl(
457         struct se_portal_group *tpg,
458         struct se_node_acl *acl,
459         int force)
460 {
461         LIST_HEAD(sess_list);
462         struct se_session *sess, *sess_tmp;
463         unsigned long flags;
464         int rc;
465
466         spin_lock_irq(&tpg->acl_node_lock);
467         if (acl->dynamic_node_acl) {
468                 acl->dynamic_node_acl = 0;
469         }
470         list_del(&acl->acl_list);
471         tpg->num_node_acls--;
472         spin_unlock_irq(&tpg->acl_node_lock);
473
474         spin_lock_irqsave(&acl->nacl_sess_lock, flags);
475         acl->acl_stop = 1;
476
477         list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
478                                 sess_acl_list) {
479                 if (sess->sess_tearing_down != 0)
480                         continue;
481
482                 target_get_session(sess);
483                 list_move(&sess->sess_acl_list, &sess_list);
484         }
485         spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
486
487         list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
488                 list_del(&sess->sess_acl_list);
489
490                 rc = tpg->se_tpg_tfo->shutdown_session(sess);
491                 target_put_session(sess);
492                 if (!rc)
493                         continue;
494                 target_put_session(sess);
495         }
496         target_put_nacl(acl);
497         /*
498          * Wait for last target_put_nacl() to complete in target_complete_nacl()
499          * for active fabric session transport_deregister_session() callbacks.
500          */
501         wait_for_completion(&acl->acl_free_comp);
502
503         core_tpg_wait_for_nacl_pr_ref(acl);
504         core_clear_initiator_node_from_tpg(acl, tpg);
505         core_free_device_list_for_node(acl, tpg);
506
507         pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
508                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
509                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
510                 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
511
512         return 0;
513 }
514 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
515
516 /*      core_tpg_set_initiator_node_queue_depth():
517  *
518  *
519  */
520 int core_tpg_set_initiator_node_queue_depth(
521         struct se_portal_group *tpg,
522         unsigned char *initiatorname,
523         u32 queue_depth,
524         int force)
525 {
526         struct se_session *sess, *init_sess = NULL;
527         struct se_node_acl *acl;
528         unsigned long flags;
529         int dynamic_acl = 0;
530
531         spin_lock_irq(&tpg->acl_node_lock);
532         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
533         if (!acl) {
534                 pr_err("Access Control List entry for %s Initiator"
535                         " Node %s does not exists for TPG %hu, ignoring"
536                         " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
537                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
538                 spin_unlock_irq(&tpg->acl_node_lock);
539                 return -ENODEV;
540         }
541         if (acl->dynamic_node_acl) {
542                 acl->dynamic_node_acl = 0;
543                 dynamic_acl = 1;
544         }
545         spin_unlock_irq(&tpg->acl_node_lock);
546
547         spin_lock_irqsave(&tpg->session_lock, flags);
548         list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
549                 if (sess->se_node_acl != acl)
550                         continue;
551
552                 if (!force) {
553                         pr_err("Unable to change queue depth for %s"
554                                 " Initiator Node: %s while session is"
555                                 " operational.  To forcefully change the queue"
556                                 " depth and force session reinstatement"
557                                 " use the \"force=1\" parameter.\n",
558                                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
559                         spin_unlock_irqrestore(&tpg->session_lock, flags);
560
561                         spin_lock_irq(&tpg->acl_node_lock);
562                         if (dynamic_acl)
563                                 acl->dynamic_node_acl = 1;
564                         spin_unlock_irq(&tpg->acl_node_lock);
565                         return -EEXIST;
566                 }
567                 /*
568                  * Determine if the session needs to be closed by our context.
569                  */
570                 if (!tpg->se_tpg_tfo->shutdown_session(sess))
571                         continue;
572
573                 init_sess = sess;
574                 break;
575         }
576
577         /*
578          * User has requested to change the queue depth for a Initiator Node.
579          * Change the value in the Node's struct se_node_acl, and call
580          * core_set_queue_depth_for_node() to add the requested queue depth.
581          *
582          * Finally call  tpg->se_tpg_tfo->close_session() to force session
583          * reinstatement to occur if there is an active session for the
584          * $FABRIC_MOD Initiator Node in question.
585          */
586         acl->queue_depth = queue_depth;
587
588         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
589                 spin_unlock_irqrestore(&tpg->session_lock, flags);
590                 /*
591                  * Force session reinstatement if
592                  * core_set_queue_depth_for_node() failed, because we assume
593                  * the $FABRIC_MOD has already the set session reinstatement
594                  * bit from tpg->se_tpg_tfo->shutdown_session() called above.
595                  */
596                 if (init_sess)
597                         tpg->se_tpg_tfo->close_session(init_sess);
598
599                 spin_lock_irq(&tpg->acl_node_lock);
600                 if (dynamic_acl)
601                         acl->dynamic_node_acl = 1;
602                 spin_unlock_irq(&tpg->acl_node_lock);
603                 return -EINVAL;
604         }
605         spin_unlock_irqrestore(&tpg->session_lock, flags);
606         /*
607          * If the $FABRIC_MOD session for the Initiator Node ACL exists,
608          * forcefully shutdown the $FABRIC_MOD session/nexus.
609          */
610         if (init_sess)
611                 tpg->se_tpg_tfo->close_session(init_sess);
612
613         pr_debug("Successfully changed queue depth to: %d for Initiator"
614                 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
615                 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
616                 tpg->se_tpg_tfo->tpg_get_tag(tpg));
617
618         spin_lock_irq(&tpg->acl_node_lock);
619         if (dynamic_acl)
620                 acl->dynamic_node_acl = 1;
621         spin_unlock_irq(&tpg->acl_node_lock);
622
623         return 0;
624 }
625 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
626
627 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
628 {
629         /* Set in core_dev_setup_virtual_lun0() */
630         struct se_device *dev = g_lun0_dev;
631         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
632         u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
633         int ret;
634
635         lun->unpacked_lun = 0;
636         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
637         atomic_set(&lun->lun_acl_count, 0);
638         init_completion(&lun->lun_shutdown_comp);
639         INIT_LIST_HEAD(&lun->lun_acl_list);
640         INIT_LIST_HEAD(&lun->lun_cmd_list);
641         spin_lock_init(&lun->lun_acl_lock);
642         spin_lock_init(&lun->lun_cmd_lock);
643         spin_lock_init(&lun->lun_sep_lock);
644
645         ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
646         if (ret < 0)
647                 return ret;
648
649         return 0;
650 }
651
652 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
653 {
654         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
655
656         core_tpg_post_dellun(se_tpg, lun);
657 }
658
659 int core_tpg_register(
660         struct target_core_fabric_ops *tfo,
661         struct se_wwn *se_wwn,
662         struct se_portal_group *se_tpg,
663         void *tpg_fabric_ptr,
664         int se_tpg_type)
665 {
666         struct se_lun *lun;
667         u32 i;
668
669         se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
670                         sizeof(struct se_lun), GFP_KERNEL);
671         if (!se_tpg->tpg_lun_list) {
672                 pr_err("Unable to allocate struct se_portal_group->"
673                                 "tpg_lun_list\n");
674                 return -ENOMEM;
675         }
676
677         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
678                 lun = se_tpg->tpg_lun_list[i];
679                 lun->unpacked_lun = i;
680                 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
681                 atomic_set(&lun->lun_acl_count, 0);
682                 init_completion(&lun->lun_shutdown_comp);
683                 INIT_LIST_HEAD(&lun->lun_acl_list);
684                 INIT_LIST_HEAD(&lun->lun_cmd_list);
685                 spin_lock_init(&lun->lun_acl_lock);
686                 spin_lock_init(&lun->lun_cmd_lock);
687                 spin_lock_init(&lun->lun_sep_lock);
688         }
689
690         se_tpg->se_tpg_type = se_tpg_type;
691         se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
692         se_tpg->se_tpg_tfo = tfo;
693         se_tpg->se_tpg_wwn = se_wwn;
694         atomic_set(&se_tpg->tpg_pr_ref_count, 0);
695         INIT_LIST_HEAD(&se_tpg->acl_node_list);
696         INIT_LIST_HEAD(&se_tpg->se_tpg_node);
697         INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
698         spin_lock_init(&se_tpg->acl_node_lock);
699         spin_lock_init(&se_tpg->session_lock);
700         spin_lock_init(&se_tpg->tpg_lun_lock);
701
702         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
703                 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
704                         kfree(se_tpg);
705                         return -ENOMEM;
706                 }
707         }
708
709         spin_lock_bh(&tpg_lock);
710         list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
711         spin_unlock_bh(&tpg_lock);
712
713         pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
714                 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
715                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
716                 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
717                 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
718
719         return 0;
720 }
721 EXPORT_SYMBOL(core_tpg_register);
722
723 int core_tpg_deregister(struct se_portal_group *se_tpg)
724 {
725         struct se_node_acl *nacl, *nacl_tmp;
726
727         pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
728                 " for endpoint: %s Portal Tag %u\n",
729                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
730                 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
731                 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
732                 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
733
734         spin_lock_bh(&tpg_lock);
735         list_del(&se_tpg->se_tpg_node);
736         spin_unlock_bh(&tpg_lock);
737
738         while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
739                 cpu_relax();
740         /*
741          * Release any remaining demo-mode generated se_node_acl that have
742          * not been released because of TFO->tpg_check_demo_mode_cache() == 1
743          * in transport_deregister_session().
744          */
745         spin_lock_irq(&se_tpg->acl_node_lock);
746         list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
747                         acl_list) {
748                 list_del(&nacl->acl_list);
749                 se_tpg->num_node_acls--;
750                 spin_unlock_irq(&se_tpg->acl_node_lock);
751
752                 core_tpg_wait_for_nacl_pr_ref(nacl);
753                 core_free_device_list_for_node(nacl, se_tpg);
754                 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
755
756                 spin_lock_irq(&se_tpg->acl_node_lock);
757         }
758         spin_unlock_irq(&se_tpg->acl_node_lock);
759
760         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
761                 core_tpg_release_virtual_lun0(se_tpg);
762
763         se_tpg->se_tpg_fabric_ptr = NULL;
764         array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
765         return 0;
766 }
767 EXPORT_SYMBOL(core_tpg_deregister);
768
769 struct se_lun *core_tpg_pre_addlun(
770         struct se_portal_group *tpg,
771         u32 unpacked_lun)
772 {
773         struct se_lun *lun;
774
775         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
776                 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
777                         "-1: %u for Target Portal Group: %u\n",
778                         tpg->se_tpg_tfo->get_fabric_name(),
779                         unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
780                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
781                 return ERR_PTR(-EOVERFLOW);
782         }
783
784         spin_lock(&tpg->tpg_lun_lock);
785         lun = tpg->tpg_lun_list[unpacked_lun];
786         if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
787                 pr_err("TPG Logical Unit Number: %u is already active"
788                         " on %s Target Portal Group: %u, ignoring request.\n",
789                         unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
790                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
791                 spin_unlock(&tpg->tpg_lun_lock);
792                 return ERR_PTR(-EINVAL);
793         }
794         spin_unlock(&tpg->tpg_lun_lock);
795
796         return lun;
797 }
798
799 int core_tpg_post_addlun(
800         struct se_portal_group *tpg,
801         struct se_lun *lun,
802         u32 lun_access,
803         void *lun_ptr)
804 {
805         int ret;
806
807         ret = core_dev_export(lun_ptr, tpg, lun);
808         if (ret < 0)
809                 return ret;
810
811         spin_lock(&tpg->tpg_lun_lock);
812         lun->lun_access = lun_access;
813         lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
814         spin_unlock(&tpg->tpg_lun_lock);
815
816         return 0;
817 }
818
819 static void core_tpg_shutdown_lun(
820         struct se_portal_group *tpg,
821         struct se_lun *lun)
822 {
823         core_clear_lun_from_tpg(lun, tpg);
824         transport_clear_lun_from_sessions(lun);
825 }
826
827 struct se_lun *core_tpg_pre_dellun(
828         struct se_portal_group *tpg,
829         u32 unpacked_lun)
830 {
831         struct se_lun *lun;
832
833         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
834                 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
835                         "-1: %u for Target Portal Group: %u\n",
836                         tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
837                         TRANSPORT_MAX_LUNS_PER_TPG-1,
838                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
839                 return ERR_PTR(-EOVERFLOW);
840         }
841
842         spin_lock(&tpg->tpg_lun_lock);
843         lun = tpg->tpg_lun_list[unpacked_lun];
844         if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
845                 pr_err("%s Logical Unit Number: %u is not active on"
846                         " Target Portal Group: %u, ignoring request.\n",
847                         tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
848                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
849                 spin_unlock(&tpg->tpg_lun_lock);
850                 return ERR_PTR(-ENODEV);
851         }
852         spin_unlock(&tpg->tpg_lun_lock);
853
854         return lun;
855 }
856
857 int core_tpg_post_dellun(
858         struct se_portal_group *tpg,
859         struct se_lun *lun)
860 {
861         core_tpg_shutdown_lun(tpg, lun);
862
863         core_dev_unexport(lun->lun_se_dev, tpg, lun);
864
865         spin_lock(&tpg->tpg_lun_lock);
866         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
867         spin_unlock(&tpg->tpg_lun_lock);
868
869         return 0;
870 }