Print this page
1023 nv_sata support for NVIDIA MCP61
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
+++ new/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 + * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 25 */
25 26
26 27 /*
27 28 *
28 - * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
29 - * based chipsets.
29 + * nv_sata is a combo SATA HBA driver for CK804/MCP04 (ck804) and
30 + * MCP55/MCP51/MCP61 (mcp5x) based chipsets.
30 31 *
31 32 * NCQ
32 33 * ---
33 34 *
34 35 * A portion of the NCQ is in place, but is incomplete. NCQ is disabled
35 36 * and is likely to be revisited in the future.
36 37 *
37 38 *
38 39 * Power Management
39 40 * ----------------
40 41 *
41 42 * Normally power management would be responsible for ensuring the device
42 43 * is quiescent and then changing power states to the device, such as
43 44 * powering down parts or all of the device. mcp5x/ck804 is unique in
44 45 * that it is only available as part of a larger southbridge chipset, so
45 46 * removing power to the device isn't possible. Switches to control
46 47 * power management states D0/D3 in the PCI configuration space appear to
47 48 * be supported but changes to these states are apparently are ignored.
48 49 * The only further PM that the driver _could_ do is shut down the PHY,
49 50 * but in order to deliver the first rev of the driver sooner than later,
50 51 * that will be deferred until some future phase.
51 52 *
52 53 * Since the driver currently will not directly change any power state to
53 54 * the device, no power() entry point will be required. However, it is
54 55 * possible that in ACPI power state S3, aka suspend to RAM, that power
55 56 * can be removed to the device, and the driver cannot rely on BIOS to
56 57 * have reset any state. For the time being, there is no known
57 58 * non-default configurations that need to be programmed. This judgement
58 59 * is based on the port of the legacy ata driver not having any such
59 60 * functionality and based on conversations with the PM team. If such a
60 61 * restoration is later deemed necessary it can be incorporated into the
61 62 * DDI_RESUME processing.
62 63 *
63 64 */
64 65
65 66 #include <sys/scsi/scsi.h>
66 67 #include <sys/pci.h>
67 68 #include <sys/byteorder.h>
68 69 #include <sys/sunddi.h>
69 70 #include <sys/sata/sata_hba.h>
70 71 #ifdef SGPIO_SUPPORT
71 72 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
72 73 #include <sys/devctl.h>
73 74 #include <sys/sdt.h>
74 75 #endif
75 76 #include <sys/sata/adapters/nv_sata/nv_sata.h>
76 77 #include <sys/disp.h>
77 78 #include <sys/note.h>
78 79 #include <sys/promif.h>
79 80
80 81
81 82 /*
82 83 * Function prototypes for driver entry points
83 84 */
84 85 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
85 86 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
86 87 static int nv_quiesce(dev_info_t *dip);
87 88 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
88 89 void *arg, void **result);
89 90
90 91 /*
91 92 * Function prototypes for entry points from sata service module
92 93 * These functions are distinguished from other local functions
93 94 * by the prefix "nv_sata_"
94 95 */
95 96 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
96 97 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
97 98 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
98 99 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
99 100 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
100 101
101 102 /*
102 103 * Local function prototypes
103 104 */
104 105 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
105 106 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
106 107 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
107 108 #ifdef NV_MSI_SUPPORTED
108 109 static int nv_add_msi_intrs(nv_ctl_t *nvc);
109 110 #endif
110 111 static void nv_rem_intrs(nv_ctl_t *nvc);
111 112 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
112 113 static int nv_start_nodata(nv_port_t *nvp, int slot);
113 114 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
114 115 static int nv_start_pio_in(nv_port_t *nvp, int slot);
115 116 static int nv_start_pio_out(nv_port_t *nvp, int slot);
116 117 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
117 118 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
118 119 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
119 120 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
120 121 static int nv_start_dma(nv_port_t *nvp, int slot);
121 122 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
122 123 static void nv_uninit_ctl(nv_ctl_t *nvc);
123 124 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
124 125 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125 126 static void nv_uninit_port(nv_port_t *nvp);
126 127 static void nv_init_port(nv_port_t *nvp);
127 128 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
128 129 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
129 130 #ifdef NCQ
130 131 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131 132 #endif
132 133 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
133 134 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
134 135 int state);
135 136 static void nv_common_reg_init(nv_ctl_t *nvc);
136 137 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
137 138 static void nv_reset(nv_port_t *nvp, char *reason);
138 139 static void nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot);
139 140 static void nv_timeout(void *);
140 141 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
141 142 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
142 143 static void nv_read_signature(nv_port_t *nvp);
143 144 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
144 145 static void ck804_set_intr(nv_port_t *nvp, int flag);
145 146 static void nv_resume(nv_port_t *nvp);
146 147 static void nv_suspend(nv_port_t *nvp);
147 148 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
148 149 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
149 150 boolean_t reset);
150 151 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
151 152 sata_pkt_t *spkt);
152 153 static void nv_link_event(nv_port_t *nvp, int flags);
153 154 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
154 155 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
155 156 uchar_t failure_onbits2, uchar_t failure_offbits2,
156 157 uchar_t failure_onbits3, uchar_t failure_offbits3,
157 158 uint_t timeout_usec, int type_wait);
158 159 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
159 160 uint_t timeout_usec, int type_wait);
160 161 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
161 162 static void nv_setup_timeout(nv_port_t *nvp, clock_t microseconds);
162 163 static clock_t nv_monitor_reset(nv_port_t *nvp);
163 164 static int nv_bm_status_clear(nv_port_t *nvp);
164 165 static void nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...);
165 166
166 167 #ifdef SGPIO_SUPPORT
167 168 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
168 169 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
169 170 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
170 171 cred_t *credp, int *rvalp);
171 172
172 173 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
173 174 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
174 175 uint32_t *cbpp);
175 176 static int nv_sgp_init(nv_ctl_t *nvc);
176 177 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
177 178 static int nv_sgp_csr_read(nv_ctl_t *nvc);
178 179 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
179 180 static int nv_sgp_write_data(nv_ctl_t *nvc);
180 181 static void nv_sgp_activity_led_ctl(void *arg);
181 182 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
182 183 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
183 184 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
184 185 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
185 186 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
186 187 static void nv_sgp_cleanup(nv_ctl_t *nvc);
187 188 #endif
188 189
189 190
190 191 /*
191 192 * DMA attributes for the data buffer for x86. dma_attr_burstsizes is unused.
192 193 * Verify if needed if ported to other ISA.
193 194 */
194 195 static ddi_dma_attr_t buffer_dma_attr = {
195 196 DMA_ATTR_V0, /* dma_attr_version */
196 197 0, /* dma_attr_addr_lo: lowest bus address */
197 198 0xffffffffull, /* dma_attr_addr_hi: */
198 199 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max i.e for one cookie */
199 200 4, /* dma_attr_align */
200 201 1, /* dma_attr_burstsizes. */
201 202 1, /* dma_attr_minxfer */
202 203 0xffffffffull, /* dma_attr_maxxfer including all cookies */
203 204 0xffffffffull, /* dma_attr_seg */
204 205 NV_DMA_NSEGS, /* dma_attr_sgllen */
205 206 512, /* dma_attr_granular */
206 207 0, /* dma_attr_flags */
207 208 };
208 209 static ddi_dma_attr_t buffer_dma_40bit_attr = {
209 210 DMA_ATTR_V0, /* dma_attr_version */
210 211 0, /* dma_attr_addr_lo: lowest bus address */
211 212 0xffffffffffull, /* dma_attr_addr_hi: */
212 213 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max i.e for one cookie */
213 214 4, /* dma_attr_align */
214 215 1, /* dma_attr_burstsizes. */
215 216 1, /* dma_attr_minxfer */
216 217 0xffffffffull, /* dma_attr_maxxfer including all cookies */
217 218 0xffffffffull, /* dma_attr_seg */
218 219 NV_DMA_NSEGS, /* dma_attr_sgllen */
219 220 512, /* dma_attr_granular */
220 221 0, /* dma_attr_flags */
221 222 };
222 223
223 224
224 225 /*
225 226 * DMA attributes for PRD tables
226 227 */
227 228 ddi_dma_attr_t nv_prd_dma_attr = {
228 229 DMA_ATTR_V0, /* dma_attr_version */
229 230 0, /* dma_attr_addr_lo */
230 231 0xffffffffull, /* dma_attr_addr_hi */
231 232 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max */
232 233 4, /* dma_attr_align */
233 234 1, /* dma_attr_burstsizes */
234 235 1, /* dma_attr_minxfer */
235 236 NV_BM_64K_BOUNDARY, /* dma_attr_maxxfer */
236 237 NV_BM_64K_BOUNDARY - 1, /* dma_attr_seg */
237 238 1, /* dma_attr_sgllen */
238 239 1, /* dma_attr_granular */
239 240 0 /* dma_attr_flags */
240 241 };
241 242
242 243 /*
243 244 * Device access attributes
244 245 */
245 246 static ddi_device_acc_attr_t accattr = {
246 247 DDI_DEVICE_ATTR_V0,
247 248 DDI_STRUCTURE_LE_ACC,
248 249 DDI_STRICTORDER_ACC
249 250 };
250 251
251 252
252 253 #ifdef SGPIO_SUPPORT
253 254 static struct cb_ops nv_cb_ops = {
254 255 nv_open, /* open */
255 256 nv_close, /* close */
256 257 nodev, /* strategy (block) */
257 258 nodev, /* print (block) */
258 259 nodev, /* dump (block) */
259 260 nodev, /* read */
260 261 nodev, /* write */
261 262 nv_ioctl, /* ioctl */
262 263 nodev, /* devmap */
263 264 nodev, /* mmap */
264 265 nodev, /* segmap */
265 266 nochpoll, /* chpoll */
266 267 ddi_prop_op, /* prop_op */
267 268 NULL, /* streams */
268 269 D_NEW | D_MP |
269 270 D_64BIT | D_HOTPLUG, /* flags */
270 271 CB_REV /* rev */
271 272 };
272 273 #endif /* SGPIO_SUPPORT */
273 274
274 275
275 276 static struct dev_ops nv_dev_ops = {
276 277 DEVO_REV, /* devo_rev */
277 278 0, /* refcnt */
278 279 nv_getinfo, /* info */
279 280 nulldev, /* identify */
280 281 nulldev, /* probe */
281 282 nv_attach, /* attach */
282 283 nv_detach, /* detach */
283 284 nodev, /* no reset */
284 285 #ifdef SGPIO_SUPPORT
285 286 &nv_cb_ops, /* driver operations */
286 287 #else
287 288 (struct cb_ops *)0, /* driver operations */
288 289 #endif
289 290 NULL, /* bus operations */
290 291 NULL, /* power */
291 292 nv_quiesce /* quiesce */
292 293 };
293 294
294 295
295 296 /*
296 297 * Request Sense CDB for ATAPI
297 298 */
298 299 static const uint8_t nv_rqsense_cdb[16] = {
299 300 SCMD_REQUEST_SENSE,
300 301 0,
301 302 0,
302 303 0,
303 304 SATA_ATAPI_MIN_RQSENSE_LEN,
304 305 0,
↓ open down ↓ |
265 lines elided |
↑ open up ↑ |
305 306 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* pad out to max CDB length */
306 307 };
307 308
308 309
309 310 static sata_tran_hotplug_ops_t nv_hotplug_ops;
310 311
311 312 extern struct mod_ops mod_driverops;
312 313
313 314 static struct modldrv modldrv = {
314 315 &mod_driverops, /* driverops */
315 - "Nvidia ck804/mcp51/mcp55 HBA",
316 + "NVIDIA CK804/MCP04/MCP51/MCP55/MCP61 HBA",
316 317 &nv_dev_ops, /* driver ops */
317 318 };
318 319
319 320 static struct modlinkage modlinkage = {
320 321 MODREV_1,
321 322 &modldrv,
322 323 NULL
323 324 };
324 325
325 326 /*
326 327 * Maximum number of consecutive interrupts processed in the loop in the
327 328 * single invocation of the port interrupt routine.
328 329 */
329 330 int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
330 331
331 332 /*
332 333 * wait between checks of reg status
333 334 */
334 335 int nv_usec_delay = NV_WAIT_REG_CHECK;
335 336
336 337 /*
337 338 * The following used for nv_vcmn_err() and nv_log()
338 339 */
339 340
340 341 /*
341 342 * temp buffer to save from wasting limited stack space
342 343 */
343 344 static char nv_log_buf[NV_LOGBUF_LEN];
344 345
345 346 /*
346 347 * protects nv_log_buf
347 348 */
348 349 static kmutex_t nv_log_mutex;
349 350
350 351 /*
351 352 * these on-by-default flags were chosen so that the driver
352 353 * logs as much non-usual run-time information as possible
353 354 * without overflowing the ring with useless information or
354 355 * causing any significant performance penalty.
355 356 */
356 357 int nv_debug_flags =
357 358 NVDBG_HOT|NVDBG_RESET|NVDBG_ALWAYS|NVDBG_TIMEOUT|NVDBG_EVENT;
358 359
359 360 /*
360 361 * normally debug information is not logged to the console
361 362 * but this allows it to be enabled.
362 363 */
363 364 int nv_log_to_console = B_FALSE;
364 365
365 366 /*
366 367 * normally debug information is not logged to cmn_err but
367 368 * in some cases it may be desired.
368 369 */
369 370 int nv_log_to_cmn_err = B_FALSE;
370 371
371 372 /*
372 373 * using prom print avoids using cmn_err/syslog and goes right
373 374 * to the console which may be desirable in some situations, but
374 375 * it may be synchronous, which would change timings and
375 376 * impact performance. Use with caution.
376 377 */
377 378 int nv_prom_print = B_FALSE;
378 379
379 380 /*
380 381 * Opaque state pointer to be initialized by ddi_soft_state_init()
381 382 */
382 383 static void *nv_statep = NULL;
383 384
384 385 /*
385 386 * Map from CBP to shared space
386 387 *
387 388 * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
388 389 * Control Block Pointer as well as the corresponding Control Block) that
389 390 * is shared across all driver instances associated with that part. The
390 391 * Control Block is used to update and query the LED state for the devices
391 392 * on the controllers associated with those instances. There is also some
392 393 * driver state (called the 'common' area here) associated with each SGPIO
393 394 * Control Block. The nv_sgp_cpb2cmn is used to map a given CBP to its
394 395 * control area.
395 396 *
396 397 * The driver can also use this mapping array to determine whether the
397 398 * common area for a given CBP has been initialized, and, if it isn't
398 399 * initialized, initialize it.
399 400 *
400 401 * When a driver instance with a CBP value that is already in the array is
401 402 * initialized, it will use the pointer to the previously initialized common
402 403 * area associated with that SGPIO CBP value, rather than initialize it
403 404 * itself.
404 405 *
405 406 * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
406 407 */
407 408 #ifdef SGPIO_SUPPORT
408 409 static kmutex_t nv_sgp_c2c_mutex;
409 410 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
410 411 #endif
411 412
412 413 /*
413 414 * control whether 40bit DMA is used or not
414 415 */
415 416 int nv_sata_40bit_dma = B_TRUE;
416 417
417 418 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
418 419 SATA_TRAN_HOTPLUG_OPS_REV_1, /* structure version */
419 420 nv_sata_activate, /* activate port. cfgadm -c connect */
420 421 nv_sata_deactivate /* deactivate port. cfgadm -c disconnect */
421 422 };
422 423
423 424
424 425 /*
425 426 * nv module initialization
426 427 */
427 428 int
428 429 _init(void)
429 430 {
430 431 int error;
431 432 #ifdef SGPIO_SUPPORT
432 433 int i;
433 434 #endif
434 435
435 436 error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
436 437
437 438 if (error != 0) {
438 439
439 440 return (error);
440 441 }
441 442
442 443 mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
443 444 #ifdef SGPIO_SUPPORT
444 445 mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
445 446
446 447 for (i = 0; i < NV_MAX_CBPS; i++) {
447 448 nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
448 449 nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
449 450 }
450 451 #endif
451 452
452 453 if ((error = sata_hba_init(&modlinkage)) != 0) {
453 454 ddi_soft_state_fini(&nv_statep);
454 455 mutex_destroy(&nv_log_mutex);
455 456
456 457 return (error);
457 458 }
458 459
459 460 error = mod_install(&modlinkage);
460 461 if (error != 0) {
461 462 sata_hba_fini(&modlinkage);
462 463 ddi_soft_state_fini(&nv_statep);
463 464 mutex_destroy(&nv_log_mutex);
464 465
465 466 return (error);
466 467 }
467 468
468 469 return (error);
469 470 }
470 471
471 472
472 473 /*
473 474 * nv module uninitialize
474 475 */
475 476 int
476 477 _fini(void)
477 478 {
478 479 int error;
479 480
480 481 error = mod_remove(&modlinkage);
481 482
482 483 if (error != 0) {
483 484 return (error);
484 485 }
485 486
486 487 /*
487 488 * remove the resources allocated in _init()
488 489 */
489 490 mutex_destroy(&nv_log_mutex);
490 491 #ifdef SGPIO_SUPPORT
491 492 mutex_destroy(&nv_sgp_c2c_mutex);
492 493 #endif
493 494 sata_hba_fini(&modlinkage);
494 495 ddi_soft_state_fini(&nv_statep);
495 496
496 497 return (error);
497 498 }
498 499
499 500
500 501 /*
501 502 * nv _info entry point
502 503 */
503 504 int
504 505 _info(struct modinfo *modinfop)
505 506 {
506 507 return (mod_info(&modlinkage, modinfop));
507 508 }
508 509
509 510
510 511 /*
511 512 * these wrappers for ddi_{get,put}8 are for observability
512 513 * with dtrace
513 514 */
514 515 #ifdef DEBUG
515 516
516 517 static void
517 518 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
518 519 {
519 520 ddi_put8(handle, dev_addr, value);
520 521 }
521 522
522 523 static void
523 524 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
524 525 {
525 526 ddi_put32(handle, dev_addr, value);
526 527 }
527 528
528 529 static uint32_t
529 530 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
530 531 {
531 532 return (ddi_get32(handle, dev_addr));
532 533 }
533 534
534 535 static void
535 536 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
536 537 {
537 538 ddi_put16(handle, dev_addr, value);
538 539 }
539 540
540 541 static uint16_t
541 542 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
542 543 {
543 544 return (ddi_get16(handle, dev_addr));
544 545 }
545 546
546 547 static uint8_t
547 548 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
548 549 {
549 550 return (ddi_get8(handle, dev_addr));
550 551 }
551 552
552 553 #else
553 554
554 555 #define nv_put8 ddi_put8
555 556 #define nv_put32 ddi_put32
556 557 #define nv_get32 ddi_get32
557 558 #define nv_put16 ddi_put16
558 559 #define nv_get16 ddi_get16
559 560 #define nv_get8 ddi_get8
560 561
561 562 #endif
562 563
563 564
564 565 /*
565 566 * Driver attach
566 567 */
567 568 static int
568 569 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
569 570 {
570 571 int status, attach_state, intr_types, bar, i, j, command;
571 572 int inst = ddi_get_instance(dip);
572 573 ddi_acc_handle_t pci_conf_handle;
573 574 nv_ctl_t *nvc;
574 575 uint8_t subclass;
575 576 uint32_t reg32;
576 577 #ifdef SGPIO_SUPPORT
577 578 pci_regspec_t *regs;
578 579 int rlen;
579 580 #endif
580 581
581 582 switch (cmd) {
582 583
583 584 case DDI_ATTACH:
584 585
585 586 attach_state = ATTACH_PROGRESS_NONE;
586 587
587 588 status = ddi_soft_state_zalloc(nv_statep, inst);
588 589
589 590 if (status != DDI_SUCCESS) {
590 591 break;
591 592 }
↓ open down ↓ |
266 lines elided |
↑ open up ↑ |
592 593
593 594 nvc = ddi_get_soft_state(nv_statep, inst);
594 595
595 596 nvc->nvc_dip = dip;
596 597
597 598 NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach(): DDI_ATTACH", NULL);
598 599
599 600 attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
600 601
601 602 if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
603 + nvc->nvc_devid = pci_config_get16(pci_conf_handle,
604 + PCI_CONF_DEVID);
602 605 nvc->nvc_revid = pci_config_get8(pci_conf_handle,
603 606 PCI_CONF_REVID);
604 607 NVLOG(NVDBG_INIT, nvc, NULL,
605 - "inst %d: silicon revid is %x nv_debug_flags=%x",
606 - inst, nvc->nvc_revid, nv_debug_flags);
608 + "inst %d: devid is %x silicon revid is %x"
609 + " nv_debug_flags=%x", inst, nvc->nvc_devid,
610 + nvc->nvc_revid, nv_debug_flags);
607 611 } else {
608 612 break;
609 613 }
610 614
611 615 attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
612 616
613 617 /*
614 618 * Set the PCI command register: enable IO/MEM/Master.
615 619 */
616 620 command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
617 621 pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
618 622 command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
619 623
620 624 subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
621 625
622 626 if (subclass & PCI_MASS_RAID) {
623 627 cmn_err(CE_WARN,
624 628 "attach failed: RAID mode not supported");
625 629
626 630 break;
627 631 }
628 632
629 633 /*
630 634 * the 6 bars of the controller are:
631 635 * 0: port 0 task file
632 636 * 1: port 0 status
633 637 * 2: port 1 task file
634 638 * 3: port 1 status
635 639 * 4: bus master for both ports
636 640 * 5: extended registers for SATA features
637 641 */
638 642 for (bar = 0; bar < 6; bar++) {
639 643 status = ddi_regs_map_setup(dip, bar + 1,
640 644 (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
641 645 &nvc->nvc_bar_hdl[bar]);
642 646
643 647 if (status != DDI_SUCCESS) {
644 648 NVLOG(NVDBG_INIT, nvc, NULL,
645 649 "ddi_regs_map_setup failure for bar"
646 650 " %d status = %d", bar, status);
647 651 break;
648 652 }
649 653 }
650 654
651 655 attach_state |= ATTACH_PROGRESS_BARS;
652 656
653 657 /*
654 658 * initialize controller structures
655 659 */
656 660 status = nv_init_ctl(nvc, pci_conf_handle);
657 661
658 662 if (status == NV_FAILURE) {
659 663 NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl failed",
660 664 NULL);
661 665
662 666 break;
663 667 }
664 668
665 669 attach_state |= ATTACH_PROGRESS_CTL_SETUP;
666 670
667 671 /*
668 672 * initialize mutexes
669 673 */
670 674 mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
671 675 DDI_INTR_PRI(nvc->nvc_intr_pri));
672 676
673 677 attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
674 678
675 679 /*
676 680 * get supported interrupt types
677 681 */
678 682 if (ddi_intr_get_supported_types(dip, &intr_types) !=
679 683 DDI_SUCCESS) {
680 684 nv_cmn_err(CE_WARN, nvc, NULL,
681 685 "ddi_intr_get_supported_types failed");
682 686
683 687 break;
684 688 }
685 689
686 690 NVLOG(NVDBG_INIT, nvc, NULL,
687 691 "ddi_intr_get_supported_types() returned: 0x%x",
688 692 intr_types);
689 693
690 694 #ifdef NV_MSI_SUPPORTED
691 695 if (intr_types & DDI_INTR_TYPE_MSI) {
692 696 NVLOG(NVDBG_INIT, nvc, NULL,
693 697 "using MSI interrupt type", NULL);
694 698
695 699 /*
696 700 * Try MSI first, but fall back to legacy if MSI
697 701 * attach fails
698 702 */
699 703 if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
700 704 nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
701 705 attach_state |= ATTACH_PROGRESS_INTR_ADDED;
702 706 NVLOG(NVDBG_INIT, nvc, NULL,
703 707 "MSI interrupt setup done", NULL);
704 708 } else {
705 709 nv_cmn_err(CE_CONT, nvc, NULL,
706 710 "MSI registration failed "
707 711 "will try Legacy interrupts");
708 712 }
709 713 }
710 714 #endif
711 715
712 716 /*
713 717 * Either the MSI interrupt setup has failed or only
714 718 * the fixed interrupts are available on the system.
715 719 */
716 720 if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
717 721 (intr_types & DDI_INTR_TYPE_FIXED)) {
718 722
719 723 NVLOG(NVDBG_INIT, nvc, NULL,
720 724 "using Legacy interrupt type", NULL);
721 725
722 726 if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
723 727 nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
724 728 attach_state |= ATTACH_PROGRESS_INTR_ADDED;
725 729 NVLOG(NVDBG_INIT, nvc, NULL,
726 730 "Legacy interrupt setup done", NULL);
727 731 } else {
728 732 nv_cmn_err(CE_WARN, nvc, NULL,
729 733 "legacy interrupt setup failed");
730 734 NVLOG(NVDBG_INIT, nvc, NULL,
731 735 "legacy interrupt setup failed", NULL);
732 736 break;
733 737 }
734 738 }
735 739
736 740 if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
737 741 NVLOG(NVDBG_INIT, nvc, NULL,
738 742 "no interrupts registered", NULL);
739 743 break;
740 744 }
741 745
742 746 #ifdef SGPIO_SUPPORT
743 747 /*
744 748 * save off the controller number
745 749 */
746 750 (void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
747 751 "reg", (caddr_t)®s, &rlen);
748 752 nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
749 753 kmem_free(regs, rlen);
750 754
751 755 /*
752 756 * initialize SGPIO
753 757 */
754 758 nv_sgp_led_init(nvc, pci_conf_handle);
755 759 #endif /* SGPIO_SUPPORT */
756 760
757 761 /*
758 762 * Do initial reset so that signature can be gathered
759 763 */
760 764 for (j = 0; j < NV_NUM_PORTS; j++) {
761 765 ddi_acc_handle_t bar5_hdl;
762 766 uint32_t sstatus;
763 767 nv_port_t *nvp;
764 768
765 769 nvp = &(nvc->nvc_port[j]);
766 770 bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
767 771 sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
768 772
769 773 if (SSTATUS_GET_DET(sstatus) ==
770 774 SSTATUS_DET_DEVPRE_PHYCOM) {
771 775
772 776 nvp->nvp_state |= NV_ATTACH;
773 777 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
774 778 mutex_enter(&nvp->nvp_mutex);
775 779 nv_reset(nvp, "attach");
776 780
777 781 while (nvp->nvp_state & NV_RESET) {
778 782 cv_wait(&nvp->nvp_reset_cv,
779 783 &nvp->nvp_mutex);
780 784 }
781 785
782 786 mutex_exit(&nvp->nvp_mutex);
783 787 }
784 788 }
785 789
786 790 /*
787 791 * attach to sata module
788 792 */
789 793 if (sata_hba_attach(nvc->nvc_dip,
790 794 &nvc->nvc_sata_hba_tran,
791 795 DDI_ATTACH) != DDI_SUCCESS) {
792 796 attach_state |= ATTACH_PROGRESS_SATA_MODULE;
793 797
794 798 break;
795 799 }
796 800
797 801 pci_config_teardown(&pci_conf_handle);
798 802
799 803 NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS", NULL);
800 804
801 805 return (DDI_SUCCESS);
802 806
803 807 case DDI_RESUME:
804 808
805 809 nvc = ddi_get_soft_state(nv_statep, inst);
806 810
807 811 NVLOG(NVDBG_INIT, nvc, NULL,
808 812 "nv_attach(): DDI_RESUME inst %d", inst);
809 813
810 814 if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
811 815 return (DDI_FAILURE);
812 816 }
813 817
814 818 /*
815 819 * Set the PCI command register: enable IO/MEM/Master.
816 820 */
817 821 command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
818 822 pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
819 823 command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
820 824
821 825 /*
822 826 * Need to set bit 2 to 1 at config offset 0x50
823 827 * to enable access to the bar5 registers.
824 828 */
825 829 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
826 830
827 831 if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
828 832 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
829 833 reg32 | NV_BAR5_SPACE_EN);
830 834 }
831 835
832 836 nvc->nvc_state &= ~NV_CTRL_SUSPEND;
833 837
834 838 for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
835 839 nv_resume(&(nvc->nvc_port[i]));
836 840 }
837 841
838 842 pci_config_teardown(&pci_conf_handle);
839 843
840 844 return (DDI_SUCCESS);
841 845
842 846 default:
843 847 return (DDI_FAILURE);
844 848 }
845 849
846 850
847 851 /*
848 852 * DDI_ATTACH failure path starts here
849 853 */
850 854
851 855 if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
852 856 nv_rem_intrs(nvc);
853 857 }
854 858
855 859 if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
856 860 /*
857 861 * Remove timers
858 862 */
859 863 int port = 0;
860 864 nv_port_t *nvp;
861 865
862 866 for (; port < NV_MAX_PORTS(nvc); port++) {
863 867 nvp = &(nvc->nvc_port[port]);
864 868 if (nvp->nvp_timeout_id != 0) {
865 869 (void) untimeout(nvp->nvp_timeout_id);
866 870 }
867 871 }
868 872 }
869 873
870 874 if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
871 875 mutex_destroy(&nvc->nvc_mutex);
872 876 }
873 877
874 878 if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
875 879 nv_uninit_ctl(nvc);
876 880 }
877 881
878 882 if (attach_state & ATTACH_PROGRESS_BARS) {
879 883 while (--bar >= 0) {
880 884 ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
881 885 }
882 886 }
883 887
884 888 if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
885 889 ddi_soft_state_free(nv_statep, inst);
886 890 }
887 891
888 892 if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
889 893 pci_config_teardown(&pci_conf_handle);
890 894 }
891 895
892 896 cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
893 897
894 898 return (DDI_FAILURE);
895 899 }
896 900
897 901
898 902 static int
899 903 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
900 904 {
901 905 int i, port, inst = ddi_get_instance(dip);
902 906 nv_ctl_t *nvc;
903 907 nv_port_t *nvp;
904 908
905 909 nvc = ddi_get_soft_state(nv_statep, inst);
906 910
907 911 switch (cmd) {
908 912
909 913 case DDI_DETACH:
910 914
911 915 NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH", NULL);
912 916
913 917 /*
914 918 * Remove interrupts
915 919 */
916 920 nv_rem_intrs(nvc);
917 921
918 922 /*
919 923 * Remove timers
920 924 */
921 925 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
922 926 nvp = &(nvc->nvc_port[port]);
923 927 if (nvp->nvp_timeout_id != 0) {
924 928 (void) untimeout(nvp->nvp_timeout_id);
925 929 }
926 930 }
927 931
928 932 /*
929 933 * Remove maps
930 934 */
931 935 for (i = 0; i < 6; i++) {
932 936 ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
933 937 }
934 938
935 939 /*
936 940 * Destroy mutexes
937 941 */
938 942 mutex_destroy(&nvc->nvc_mutex);
939 943
940 944 /*
941 945 * Uninitialize the controller structures
942 946 */
943 947 nv_uninit_ctl(nvc);
944 948
945 949 #ifdef SGPIO_SUPPORT
946 950 /*
947 951 * release SGPIO resources
948 952 */
949 953 nv_sgp_cleanup(nvc);
950 954 #endif
951 955
952 956 /*
953 957 * unregister from the sata module
954 958 */
955 959 (void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
956 960
957 961 /*
958 962 * Free soft state
959 963 */
960 964 ddi_soft_state_free(nv_statep, inst);
961 965
962 966 return (DDI_SUCCESS);
963 967
964 968 case DDI_SUSPEND:
965 969
966 970 NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND", NULL);
967 971
968 972 for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
969 973 nv_suspend(&(nvc->nvc_port[i]));
970 974 }
971 975
972 976 nvc->nvc_state |= NV_CTRL_SUSPEND;
973 977
974 978 return (DDI_SUCCESS);
975 979
976 980 default:
977 981 return (DDI_FAILURE);
978 982 }
979 983 }
980 984
981 985
982 986 /*ARGSUSED*/
983 987 static int
984 988 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
985 989 {
986 990 nv_ctl_t *nvc;
987 991 int instance;
988 992 dev_t dev;
989 993
990 994 dev = (dev_t)arg;
991 995 instance = getminor(dev);
992 996
993 997 switch (infocmd) {
994 998 case DDI_INFO_DEVT2DEVINFO:
995 999 nvc = ddi_get_soft_state(nv_statep, instance);
996 1000 if (nvc != NULL) {
997 1001 *result = nvc->nvc_dip;
998 1002 return (DDI_SUCCESS);
999 1003 } else {
1000 1004 *result = NULL;
1001 1005 return (DDI_FAILURE);
1002 1006 }
1003 1007 case DDI_INFO_DEVT2INSTANCE:
1004 1008 *(int *)result = instance;
1005 1009 break;
1006 1010 default:
1007 1011 break;
1008 1012 }
1009 1013 return (DDI_SUCCESS);
1010 1014 }
1011 1015
1012 1016
1013 1017 #ifdef SGPIO_SUPPORT
1014 1018 /* ARGSUSED */
1015 1019 static int
1016 1020 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1017 1021 {
1018 1022 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1019 1023
1020 1024 if (nvc == NULL) {
1021 1025 return (ENXIO);
1022 1026 }
1023 1027
1024 1028 return (0);
1025 1029 }
1026 1030
1027 1031
1028 1032 /* ARGSUSED */
1029 1033 static int
1030 1034 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1031 1035 {
1032 1036 return (0);
1033 1037 }
1034 1038
1035 1039
1036 1040 /* ARGSUSED */
1037 1041 static int
1038 1042 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1039 1043 {
1040 1044 nv_ctl_t *nvc;
1041 1045 int inst;
1042 1046 int status;
1043 1047 int ctlr, port;
1044 1048 int drive;
1045 1049 uint8_t curr_led;
1046 1050 struct dc_led_ctl led;
1047 1051
1048 1052 inst = getminor(dev);
1049 1053 if (inst == -1) {
1050 1054 return (EBADF);
1051 1055 }
1052 1056
1053 1057 nvc = ddi_get_soft_state(nv_statep, inst);
1054 1058 if (nvc == NULL) {
1055 1059 return (EBADF);
1056 1060 }
1057 1061
1058 1062 if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1059 1063 return (EIO);
1060 1064 }
1061 1065
1062 1066 switch (cmd) {
1063 1067 case DEVCTL_SET_LED:
1064 1068 status = ddi_copyin((void *)arg, &led,
1065 1069 sizeof (struct dc_led_ctl), mode);
1066 1070 if (status != 0)
1067 1071 return (EFAULT);
1068 1072
1069 1073 /*
1070 1074 * Since only the first two controller currently support
1071 1075 * SGPIO (as per NVIDIA docs), this code will as well.
1072 1076 * Note that this validate the port value within led_state
1073 1077 * as well.
1074 1078 */
1075 1079
1076 1080 ctlr = SGP_DRV_TO_CTLR(led.led_number);
1077 1081 if ((ctlr != 0) && (ctlr != 1))
1078 1082 return (ENXIO);
1079 1083
1080 1084 if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1081 1085 (led.led_state & DCL_STATE_SLOW_BLNK)) {
1082 1086 return (EINVAL);
1083 1087 }
1084 1088
1085 1089 drive = led.led_number;
1086 1090
1087 1091 if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1088 1092 (led.led_state == DCL_STATE_OFF)) {
1089 1093
1090 1094 if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1091 1095 nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1092 1096 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1093 1097 nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1094 1098 } else {
1095 1099 return (ENXIO);
1096 1100 }
1097 1101
1098 1102 port = SGP_DRV_TO_PORT(led.led_number);
1099 1103 nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1100 1104 }
1101 1105
1102 1106 if (led.led_ctl_active == DCL_CNTRL_ON) {
1103 1107 if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1104 1108 nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1105 1109 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1106 1110 nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1107 1111 } else {
1108 1112 return (ENXIO);
1109 1113 }
1110 1114
1111 1115 port = SGP_DRV_TO_PORT(led.led_number);
1112 1116 nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1113 1117 }
1114 1118
1115 1119 break;
1116 1120
1117 1121 case DEVCTL_GET_LED:
1118 1122 status = ddi_copyin((void *)arg, &led,
1119 1123 sizeof (struct dc_led_ctl), mode);
1120 1124 if (status != 0)
1121 1125 return (EFAULT);
1122 1126
1123 1127 /*
1124 1128 * Since only the first two controller currently support
1125 1129 * SGPIO (as per NVIDIA docs), this code will as well.
1126 1130 * Note that this validate the port value within led_state
1127 1131 * as well.
1128 1132 */
1129 1133
1130 1134 ctlr = SGP_DRV_TO_CTLR(led.led_number);
1131 1135 if ((ctlr != 0) && (ctlr != 1))
1132 1136 return (ENXIO);
1133 1137
1134 1138 curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1135 1139 led.led_number);
1136 1140
1137 1141 port = SGP_DRV_TO_PORT(led.led_number);
1138 1142 if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1139 1143 led.led_ctl_active = DCL_CNTRL_ON;
1140 1144
1141 1145 if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1142 1146 if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1143 1147 led.led_state = DCL_STATE_OFF;
1144 1148 else
1145 1149 led.led_state = DCL_STATE_ON;
1146 1150 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1147 1151 if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1148 1152 led.led_state = DCL_STATE_OFF;
1149 1153 else
1150 1154 led.led_state = DCL_STATE_ON;
1151 1155 } else {
1152 1156 return (ENXIO);
1153 1157 }
1154 1158 } else {
1155 1159 led.led_ctl_active = DCL_CNTRL_OFF;
1156 1160 /*
1157 1161 * Not really off, but never set and no constant for
1158 1162 * tri-state
1159 1163 */
1160 1164 led.led_state = DCL_STATE_OFF;
1161 1165 }
1162 1166
1163 1167 status = ddi_copyout(&led, (void *)arg,
1164 1168 sizeof (struct dc_led_ctl), mode);
1165 1169 if (status != 0)
1166 1170 return (EFAULT);
1167 1171
1168 1172 break;
1169 1173
1170 1174 case DEVCTL_NUM_LEDS:
1171 1175 led.led_number = SGPIO_DRV_CNT_VALUE;
1172 1176 led.led_ctl_active = 1;
1173 1177 led.led_type = 3;
1174 1178
1175 1179 /*
1176 1180 * According to documentation, NVIDIA SGPIO is supposed to
1177 1181 * support blinking, but it does not seem to work in practice.
1178 1182 */
1179 1183 led.led_state = DCL_STATE_ON;
1180 1184
1181 1185 status = ddi_copyout(&led, (void *)arg,
1182 1186 sizeof (struct dc_led_ctl), mode);
1183 1187 if (status != 0)
1184 1188 return (EFAULT);
1185 1189
1186 1190 break;
1187 1191
1188 1192 default:
1189 1193 return (EINVAL);
1190 1194 }
1191 1195
1192 1196 return (0);
1193 1197 }
1194 1198 #endif /* SGPIO_SUPPORT */
1195 1199
1196 1200
1197 1201 /*
1198 1202 * Called by sata module to probe a port. Port and device state
1199 1203 * are not changed here... only reported back to the sata module.
1200 1204 *
1201 1205 */
1202 1206 static int
1203 1207 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1204 1208 {
1205 1209 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1206 1210 uint8_t cport = sd->satadev_addr.cport;
1207 1211 uint8_t pmport = sd->satadev_addr.pmport;
1208 1212 uint8_t qual = sd->satadev_addr.qual;
1209 1213 uint8_t det;
1210 1214
1211 1215 nv_port_t *nvp;
1212 1216
1213 1217 if (cport >= NV_MAX_PORTS(nvc)) {
1214 1218 sd->satadev_type = SATA_DTYPE_NONE;
1215 1219 sd->satadev_state = SATA_STATE_UNKNOWN;
1216 1220
1217 1221 return (SATA_FAILURE);
1218 1222 }
1219 1223
1220 1224 ASSERT(nvc->nvc_port != NULL);
1221 1225 nvp = &(nvc->nvc_port[cport]);
1222 1226 ASSERT(nvp != NULL);
1223 1227
1224 1228 NVLOG(NVDBG_ENTRY, nvc, nvp,
1225 1229 "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1226 1230 "qual: 0x%x", cport, pmport, qual);
1227 1231
1228 1232 mutex_enter(&nvp->nvp_mutex);
1229 1233
1230 1234 /*
1231 1235 * This check seems to be done in the SATA module.
1232 1236 * It may not be required here
1233 1237 */
1234 1238 if (nvp->nvp_state & NV_DEACTIVATED) {
1235 1239 nv_cmn_err(CE_WARN, nvc, nvp,
1236 1240 "port inactive. Use cfgadm to activate");
1237 1241 sd->satadev_type = SATA_DTYPE_UNKNOWN;
1238 1242 sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1239 1243 mutex_exit(&nvp->nvp_mutex);
1240 1244
1241 1245 return (SATA_SUCCESS);
1242 1246 }
1243 1247
1244 1248 if (nvp->nvp_state & NV_FAILED) {
1245 1249 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
1246 1250 "probe: port failed", NULL);
1247 1251 sd->satadev_type = nvp->nvp_type;
1248 1252 sd->satadev_state = SATA_PSTATE_FAILED;
1249 1253 mutex_exit(&nvp->nvp_mutex);
1250 1254
1251 1255 return (SATA_SUCCESS);
1252 1256 }
1253 1257
1254 1258 if (qual == SATA_ADDR_PMPORT) {
1255 1259 sd->satadev_type = SATA_DTYPE_NONE;
1256 1260 sd->satadev_state = SATA_STATE_UNKNOWN;
1257 1261 mutex_exit(&nvp->nvp_mutex);
1258 1262 nv_cmn_err(CE_WARN, nvc, nvp,
1259 1263 "controller does not support port multiplier");
1260 1264
1261 1265 return (SATA_SUCCESS);
1262 1266 }
1263 1267
1264 1268 sd->satadev_state = SATA_PSTATE_PWRON;
1265 1269
1266 1270 nv_copy_registers(nvp, sd, NULL);
1267 1271
1268 1272 if (nvp->nvp_state & (NV_RESET|NV_LINK_EVENT)) {
1269 1273 /*
1270 1274 * during a reset or link event, fake the status
1271 1275 * as it may be changing as a result of the reset
1272 1276 * or link event.
1273 1277 */
1274 1278 DTRACE_PROBE(state_reset_link_event_faking_status_p);
1275 1279 DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
1276 1280
1277 1281 SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1278 1282 SSTATUS_IPM_ACTIVE);
1279 1283 SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1280 1284 SSTATUS_DET_DEVPRE_PHYCOM);
1281 1285 sd->satadev_type = nvp->nvp_type;
1282 1286 mutex_exit(&nvp->nvp_mutex);
1283 1287
1284 1288 return (SATA_SUCCESS);
1285 1289 }
1286 1290
1287 1291 det = SSTATUS_GET_DET(sd->satadev_scr.sstatus);
1288 1292
1289 1293 /*
1290 1294 * determine link status
1291 1295 */
1292 1296 if (det != SSTATUS_DET_DEVPRE_PHYCOM) {
1293 1297 switch (det) {
1294 1298
1295 1299 case SSTATUS_DET_NODEV:
1296 1300 case SSTATUS_DET_PHYOFFLINE:
1297 1301 sd->satadev_type = SATA_DTYPE_NONE;
1298 1302 break;
1299 1303
1300 1304 default:
1301 1305 sd->satadev_type = SATA_DTYPE_UNKNOWN;
1302 1306 break;
1303 1307 }
1304 1308
1305 1309 mutex_exit(&nvp->nvp_mutex);
1306 1310
1307 1311 return (SATA_SUCCESS);
1308 1312 }
1309 1313
1310 1314 /*
1311 1315 * Just report the current port state
1312 1316 */
1313 1317 sd->satadev_type = nvp->nvp_type;
1314 1318 DTRACE_PROBE1(nvp_type_h, int, nvp->nvp_type);
1315 1319
1316 1320 mutex_exit(&nvp->nvp_mutex);
1317 1321
1318 1322 return (SATA_SUCCESS);
1319 1323 }
1320 1324
1321 1325
1322 1326 /*
1323 1327 * Called by sata module to start a new command.
1324 1328 */
1325 1329 static int
1326 1330 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1327 1331 {
1328 1332 int cport = spkt->satapkt_device.satadev_addr.cport;
1329 1333 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1330 1334 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1331 1335 int ret;
1332 1336
1333 1337 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1334 1338 spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg);
1335 1339
1336 1340 mutex_enter(&nvp->nvp_mutex);
1337 1341
1338 1342 if (nvp->nvp_state & NV_DEACTIVATED) {
1339 1343
1340 1344 NVLOG(NVDBG_ERRS, nvc, nvp,
1341 1345 "nv_sata_start: NV_DEACTIVATED", NULL);
1342 1346 DTRACE_PROBE(nvp_state_inactive_p);
1343 1347
1344 1348 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1345 1349 nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1346 1350 mutex_exit(&nvp->nvp_mutex);
1347 1351
1348 1352 return (SATA_TRAN_PORT_ERROR);
1349 1353 }
1350 1354
1351 1355 if (nvp->nvp_state & NV_FAILED) {
1352 1356
1353 1357 NVLOG(NVDBG_ERRS, nvc, nvp,
1354 1358 "nv_sata_start: NV_FAILED state", NULL);
1355 1359 DTRACE_PROBE(nvp_state_failed_p);
1356 1360
1357 1361 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1358 1362 nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1359 1363 mutex_exit(&nvp->nvp_mutex);
1360 1364
1361 1365 return (SATA_TRAN_PORT_ERROR);
1362 1366 }
1363 1367
1364 1368 if (nvp->nvp_state & NV_RESET) {
1365 1369
1366 1370 NVLOG(NVDBG_ERRS, nvc, nvp,
1367 1371 "still waiting for reset completion", NULL);
1368 1372 DTRACE_PROBE(nvp_state_reset_p);
1369 1373
1370 1374 spkt->satapkt_reason = SATA_PKT_BUSY;
1371 1375
1372 1376 /*
1373 1377 * If in panic, timeouts do not occur, so invoke
1374 1378 * reset handling directly so that the signature
1375 1379 * can be acquired to complete the reset handling.
1376 1380 */
1377 1381 if (ddi_in_panic()) {
1378 1382 NVLOG(NVDBG_ERRS, nvc, nvp,
1379 1383 "nv_sata_start: calling nv_monitor_reset "
1380 1384 "synchronously", NULL);
1381 1385
1382 1386 (void) nv_monitor_reset(nvp);
1383 1387 }
1384 1388
1385 1389 mutex_exit(&nvp->nvp_mutex);
1386 1390
1387 1391 return (SATA_TRAN_BUSY);
1388 1392 }
1389 1393
1390 1394 if (nvp->nvp_state & NV_LINK_EVENT) {
1391 1395
1392 1396 NVLOG(NVDBG_ERRS, nvc, nvp,
1393 1397 "nv_sata_start(): link event ret bsy", NULL);
1394 1398 DTRACE_PROBE(nvp_state_link_event_p);
1395 1399
1396 1400 spkt->satapkt_reason = SATA_PKT_BUSY;
1397 1401
1398 1402 if (ddi_in_panic()) {
1399 1403 NVLOG(NVDBG_ERRS, nvc, nvp,
1400 1404 "nv_sata_start: calling nv_timeout "
1401 1405 "synchronously", NULL);
1402 1406
1403 1407 nv_timeout(nvp);
1404 1408 }
1405 1409
1406 1410 mutex_exit(&nvp->nvp_mutex);
1407 1411
1408 1412 return (SATA_TRAN_BUSY);
1409 1413 }
1410 1414
1411 1415
1412 1416 if ((nvp->nvp_type == SATA_DTYPE_NONE) ||
1413 1417 (nvp->nvp_type == SATA_DTYPE_UNKNOWN)) {
1414 1418
1415 1419 NVLOG(NVDBG_ERRS, nvc, nvp,
1416 1420 "nv_sata_start: nvp_type 0x%x", nvp->nvp_type);
1417 1421 DTRACE_PROBE1(not_ready_nvp_type_h, int, nvp->nvp_type);
1418 1422
1419 1423 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1420 1424 nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1421 1425 mutex_exit(&nvp->nvp_mutex);
1422 1426
1423 1427 return (SATA_TRAN_PORT_ERROR);
1424 1428 }
1425 1429
1426 1430 if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1427 1431
1428 1432 nv_cmn_err(CE_WARN, nvc, nvp,
1429 1433 "port multiplier not supported by controller");
1430 1434
1431 1435 ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1432 1436 spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1433 1437 mutex_exit(&nvp->nvp_mutex);
1434 1438
1435 1439 return (SATA_TRAN_CMD_UNSUPPORTED);
1436 1440 }
1437 1441
1438 1442 /*
1439 1443 * after a device reset, and then when sata module restore processing
1440 1444 * is complete, the sata module will set sata_clear_dev_reset which
1441 1445 * indicates that restore processing has completed and normal
1442 1446 * non-restore related commands should be processed.
1443 1447 */
1444 1448 if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1445 1449
1446 1450 NVLOG(NVDBG_RESET, nvc, nvp,
1447 1451 "nv_sata_start: clearing NV_RESTORE", NULL);
1448 1452 DTRACE_PROBE(clearing_restore_p);
1449 1453 DTRACE_PROBE1(nvp_state_before_clear_h, int, nvp->nvp_state);
1450 1454
1451 1455 nvp->nvp_state &= ~NV_RESTORE;
1452 1456 }
1453 1457
1454 1458 /*
1455 1459 * if the device was recently reset as indicated by NV_RESTORE,
1456 1460 * only allow commands which restore device state. The sata module
1457 1461 * marks such commands with sata_ignore_dev_reset.
1458 1462 *
1459 1463 * during coredump, nv_reset is called but the restore isn't
1460 1464 * processed, so ignore the wait for restore if the system
1461 1465 * is panicing.
1462 1466 */
1463 1467 if ((nvp->nvp_state & NV_RESTORE) &&
1464 1468 !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1465 1469 (ddi_in_panic() == 0)) {
1466 1470
1467 1471 NVLOG(NVDBG_RESET, nvc, nvp,
1468 1472 "nv_sata_start: waiting for restore ", NULL);
1469 1473 DTRACE_PROBE1(restore_no_ignore_reset_nvp_state_h,
1470 1474 int, nvp->nvp_state);
1471 1475
1472 1476 spkt->satapkt_reason = SATA_PKT_BUSY;
1473 1477 mutex_exit(&nvp->nvp_mutex);
1474 1478
1475 1479 return (SATA_TRAN_BUSY);
1476 1480 }
1477 1481
1478 1482 if (nvp->nvp_state & NV_ABORTING) {
1479 1483
1480 1484 NVLOG(NVDBG_ERRS, nvc, nvp,
1481 1485 "nv_sata_start: NV_ABORTING", NULL);
1482 1486 DTRACE_PROBE1(aborting_nvp_state_h, int, nvp->nvp_state);
1483 1487
1484 1488 spkt->satapkt_reason = SATA_PKT_BUSY;
1485 1489 mutex_exit(&nvp->nvp_mutex);
1486 1490
1487 1491 return (SATA_TRAN_BUSY);
1488 1492 }
1489 1493
1490 1494 /*
1491 1495 * record command sequence for debugging.
1492 1496 */
1493 1497 nvp->nvp_seq++;
1494 1498
1495 1499 DTRACE_PROBE2(command_start, int *, nvp, int,
1496 1500 spkt->satapkt_cmd.satacmd_cmd_reg);
1497 1501
1498 1502 /*
1499 1503 * clear SError to be able to check errors after the command failure
1500 1504 */
1501 1505 nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1502 1506
1503 1507 if (spkt->satapkt_op_mode &
1504 1508 (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1505 1509
1506 1510 ret = nv_start_sync(nvp, spkt);
1507 1511
1508 1512 mutex_exit(&nvp->nvp_mutex);
1509 1513
1510 1514 return (ret);
1511 1515 }
1512 1516
1513 1517 /*
1514 1518 * start command asynchronous command
1515 1519 */
1516 1520 ret = nv_start_async(nvp, spkt);
1517 1521
1518 1522 mutex_exit(&nvp->nvp_mutex);
1519 1523
1520 1524 return (ret);
1521 1525 }
1522 1526
1523 1527
1524 1528 /*
1525 1529 * SATA_OPMODE_POLLING implies the driver is in a
1526 1530 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1527 1531 * If only SATA_OPMODE_SYNCH is set, the driver can use
1528 1532 * interrupts and sleep wait on a cv.
1529 1533 *
1530 1534 * If SATA_OPMODE_POLLING is set, the driver can't use
1531 1535 * interrupts and must busy wait and simulate the
1532 1536 * interrupts by waiting for BSY to be cleared.
1533 1537 *
1534 1538 * Synchronous mode has to return BUSY if there are
1535 1539 * any other commands already on the drive.
1536 1540 */
1537 1541 static int
1538 1542 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1539 1543 {
1540 1544 nv_ctl_t *nvc = nvp->nvp_ctlp;
1541 1545 int ret;
1542 1546
1543 1547 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry",
1544 1548 NULL);
1545 1549
1546 1550 if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1547 1551 spkt->satapkt_reason = SATA_PKT_BUSY;
1548 1552 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1549 1553 "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1550 1554 "ncq_run: %d non_ncq_run: %d spkt: %p",
1551 1555 nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1552 1556 (&(nvp->nvp_slot[0]))->nvslot_spkt);
1553 1557
1554 1558 return (SATA_TRAN_BUSY);
1555 1559 }
1556 1560
1557 1561 /*
1558 1562 * if SYNC but not POLL, verify that this is not on interrupt thread.
1559 1563 */
1560 1564 if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1561 1565 servicing_interrupt()) {
1562 1566 spkt->satapkt_reason = SATA_PKT_BUSY;
1563 1567 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1564 1568 "SYNC mode not allowed during interrupt", NULL);
1565 1569
1566 1570 return (SATA_TRAN_BUSY);
1567 1571
1568 1572 }
1569 1573
1570 1574 /*
1571 1575 * disable interrupt generation if in polled mode
1572 1576 */
1573 1577 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1574 1578 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1575 1579 }
1576 1580
1577 1581 /*
1578 1582 * overload the satapkt_reason with BUSY so code below
1579 1583 * will know when it's done
1580 1584 */
1581 1585 spkt->satapkt_reason = SATA_PKT_BUSY;
1582 1586
1583 1587 if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1584 1588 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1585 1589 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1586 1590 }
1587 1591
1588 1592 return (ret);
1589 1593 }
1590 1594
1591 1595 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1592 1596 mutex_exit(&nvp->nvp_mutex);
1593 1597 ret = nv_poll_wait(nvp, spkt);
1594 1598 mutex_enter(&nvp->nvp_mutex);
1595 1599
1596 1600 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1597 1601
1598 1602 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1599 1603 " done % reason %d", ret);
1600 1604
1601 1605 return (ret);
1602 1606 }
1603 1607
1604 1608 /*
1605 1609 * non-polling synchronous mode handling. The interrupt will signal
1606 1610 * when device IO is completed.
1607 1611 */
1608 1612 while (spkt->satapkt_reason == SATA_PKT_BUSY) {
1609 1613 cv_wait(&nvp->nvp_sync_cv, &nvp->nvp_mutex);
1610 1614 }
1611 1615
1612 1616
1613 1617 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1614 1618 " done % reason %d", spkt->satapkt_reason);
1615 1619
1616 1620 return (SATA_TRAN_ACCEPTED);
1617 1621 }
1618 1622
1619 1623
1620 1624 static int
1621 1625 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1622 1626 {
1623 1627 int ret;
1624 1628 nv_ctl_t *nvc = nvp->nvp_ctlp;
1625 1629 #if ! defined(__lock_lint)
1626 1630 nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1627 1631 #endif
1628 1632
1629 1633 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter", NULL);
1630 1634
1631 1635 for (;;) {
1632 1636
1633 1637 NV_DELAY_NSEC(400);
1634 1638
1635 1639 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait",
1636 1640 NULL);
1637 1641 if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1638 1642 NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1639 1643 mutex_enter(&nvp->nvp_mutex);
1640 1644 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1641 1645 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1642 1646 nv_reset(nvp, "poll_wait");
1643 1647 nv_complete_io(nvp, spkt, 0);
1644 1648 mutex_exit(&nvp->nvp_mutex);
1645 1649 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1646 1650 "SATA_STATUS_BSY", NULL);
1647 1651
1648 1652 return (SATA_TRAN_ACCEPTED);
1649 1653 }
1650 1654
1651 1655 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr",
1652 1656 NULL);
1653 1657
1654 1658 /*
1655 1659 * Simulate interrupt.
1656 1660 */
1657 1661 ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1658 1662 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr",
1659 1663 NULL);
1660 1664
1661 1665 if (ret != DDI_INTR_CLAIMED) {
1662 1666 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1663 1667 " unclaimed -- resetting", NULL);
1664 1668 mutex_enter(&nvp->nvp_mutex);
1665 1669 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1666 1670 nv_reset(nvp, "poll_wait intr not claimed");
1667 1671 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1668 1672 nv_complete_io(nvp, spkt, 0);
1669 1673 mutex_exit(&nvp->nvp_mutex);
1670 1674
1671 1675 return (SATA_TRAN_ACCEPTED);
1672 1676 }
1673 1677
1674 1678 #if ! defined(__lock_lint)
1675 1679 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1676 1680 /*
1677 1681 * packet is complete
1678 1682 */
1679 1683 return (SATA_TRAN_ACCEPTED);
1680 1684 }
1681 1685 #endif
1682 1686 }
1683 1687 /*NOTREACHED*/
1684 1688 }
1685 1689
1686 1690
1687 1691 /*
1688 1692 * Called by sata module to abort outstanding packets.
1689 1693 */
1690 1694 /*ARGSUSED*/
1691 1695 static int
1692 1696 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1693 1697 {
1694 1698 int cport = spkt->satapkt_device.satadev_addr.cport;
1695 1699 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1696 1700 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1697 1701 int c_a, ret;
1698 1702
1699 1703 ASSERT(cport < NV_MAX_PORTS(nvc));
1700 1704 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt);
1701 1705
1702 1706 mutex_enter(&nvp->nvp_mutex);
1703 1707
1704 1708 if (nvp->nvp_state & NV_DEACTIVATED) {
1705 1709 mutex_exit(&nvp->nvp_mutex);
1706 1710 nv_cmn_err(CE_WARN, nvc, nvp,
1707 1711 "abort request failed: port inactive");
1708 1712
1709 1713 return (SATA_FAILURE);
1710 1714 }
1711 1715
1712 1716 /*
1713 1717 * spkt == NULL then abort all commands
1714 1718 */
1715 1719 c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1716 1720
1717 1721 if (c_a) {
1718 1722 NVLOG(NVDBG_ENTRY, nvc, nvp,
1719 1723 "packets aborted running=%d", c_a);
1720 1724 ret = SATA_SUCCESS;
1721 1725 } else {
1722 1726 if (spkt == NULL) {
1723 1727 NVLOG(NVDBG_ENTRY, nvc, nvp, "no spkts to abort", NULL);
1724 1728 } else {
1725 1729 NVLOG(NVDBG_ENTRY, nvc, nvp,
1726 1730 "can't find spkt to abort", NULL);
1727 1731 }
1728 1732 ret = SATA_FAILURE;
1729 1733 }
1730 1734
1731 1735 mutex_exit(&nvp->nvp_mutex);
1732 1736
1733 1737 return (ret);
1734 1738 }
1735 1739
1736 1740
1737 1741 /*
1738 1742 * if spkt == NULL abort all pkts running, otherwise
1739 1743 * abort the requested packet. must be called with nv_mutex
1740 1744 * held and returns with it held. Not NCQ aware.
1741 1745 */
1742 1746 static int
1743 1747 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
1744 1748 boolean_t reset)
1745 1749 {
1746 1750 int aborted = 0, i, reset_once = B_FALSE;
1747 1751 struct nv_slot *nv_slotp;
1748 1752 sata_pkt_t *spkt_slot;
1749 1753
1750 1754 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1751 1755
1752 1756 NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active", NULL);
1753 1757
1754 1758 nvp->nvp_state |= NV_ABORTING;
1755 1759
1756 1760 for (i = 0; i < nvp->nvp_queue_depth; i++) {
1757 1761
1758 1762 nv_slotp = &(nvp->nvp_slot[i]);
1759 1763 spkt_slot = nv_slotp->nvslot_spkt;
1760 1764
1761 1765 /*
1762 1766 * skip if not active command in slot
1763 1767 */
1764 1768 if (spkt_slot == NULL) {
1765 1769 continue;
1766 1770 }
1767 1771
1768 1772 /*
1769 1773 * if a specific packet was requested, skip if
1770 1774 * this is not a match
1771 1775 */
1772 1776 if ((spkt != NULL) && (spkt != spkt_slot)) {
1773 1777 continue;
1774 1778 }
1775 1779
1776 1780 /*
1777 1781 * stop the hardware. This could need reworking
1778 1782 * when NCQ is enabled in the driver.
1779 1783 */
1780 1784 if (reset_once == B_FALSE) {
1781 1785 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1782 1786
1783 1787 /*
1784 1788 * stop DMA engine
1785 1789 */
1786 1790 nv_put8(bmhdl, nvp->nvp_bmicx, 0);
1787 1791
1788 1792 /*
1789 1793 * Reset only if explicitly specified by the arg reset
1790 1794 */
1791 1795 if (reset == B_TRUE) {
1792 1796 reset_once = B_TRUE;
1793 1797 nv_reset(nvp, "abort_active");
1794 1798 }
1795 1799 }
1796 1800
1797 1801 spkt_slot->satapkt_reason = abort_reason;
1798 1802 nv_complete_io(nvp, spkt_slot, i);
1799 1803 aborted++;
1800 1804 }
1801 1805
1802 1806 nvp->nvp_state &= ~NV_ABORTING;
1803 1807
1804 1808 return (aborted);
1805 1809 }
1806 1810
1807 1811
1808 1812 /*
1809 1813 * Called by sata module to reset a port, device, or the controller.
1810 1814 */
1811 1815 static int
1812 1816 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1813 1817 {
1814 1818 int cport = sd->satadev_addr.cport;
1815 1819 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1816 1820 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1817 1821 int ret = SATA_FAILURE;
1818 1822
1819 1823 ASSERT(cport < NV_MAX_PORTS(nvc));
1820 1824
1821 1825 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_reset", NULL);
1822 1826
1823 1827 mutex_enter(&nvp->nvp_mutex);
1824 1828
1825 1829 switch (sd->satadev_addr.qual) {
1826 1830
1827 1831 case SATA_ADDR_CPORT:
1828 1832 /*FALLTHROUGH*/
1829 1833 case SATA_ADDR_DCPORT:
1830 1834
1831 1835 ret = SATA_SUCCESS;
1832 1836
1833 1837 /*
1834 1838 * If a reset is already in progress, don't disturb it
1835 1839 */
1836 1840 if ((nvp->nvp_state & (NV_RESET|NV_RESTORE)) &&
1837 1841 (ddi_in_panic() == 0)) {
1838 1842 NVLOG(NVDBG_RESET, nvc, nvp,
1839 1843 "nv_sata_reset: reset already in progress", NULL);
1840 1844 DTRACE_PROBE(reset_already_in_progress_p);
1841 1845
1842 1846 break;
1843 1847 }
1844 1848
1845 1849 /*
1846 1850 * log the pre-reset state of the driver because dumping the
1847 1851 * blocks will disturb it.
1848 1852 */
1849 1853 if (ddi_in_panic() == 1) {
1850 1854 NVLOG(NVDBG_RESET, nvc, nvp, "in_panic. nvp_state: "
1851 1855 "0x%x nvp_reset_time: %d nvp_last_cmd: 0x%x "
1852 1856 "nvp_previous_cmd: 0x%x nvp_reset_count: %d "
1853 1857 "nvp_first_reset_reason: %s "
1854 1858 "nvp_reset_reason: %s nvp_seq: %d "
1855 1859 "in_interrupt: %d", nvp->nvp_state,
1856 1860 nvp->nvp_reset_time, nvp->nvp_last_cmd,
1857 1861 nvp->nvp_previous_cmd, nvp->nvp_reset_count,
1858 1862 nvp->nvp_first_reset_reason,
1859 1863 nvp->nvp_reset_reason, nvp->nvp_seq,
1860 1864 servicing_interrupt());
1861 1865 }
1862 1866
1863 1867 nv_reset(nvp, "sata_reset");
1864 1868
1865 1869 (void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1866 1870
1867 1871 /*
1868 1872 * If the port is inactive, do a quiet reset and don't attempt
1869 1873 * to wait for reset completion or do any post reset processing
1870 1874 *
1871 1875 */
1872 1876 if (nvp->nvp_state & NV_DEACTIVATED) {
1873 1877 nvp->nvp_state &= ~NV_RESET;
1874 1878 nvp->nvp_reset_time = 0;
1875 1879
1876 1880 break;
1877 1881 }
1878 1882
1879 1883 /*
1880 1884 * clear the port failed flag. It will get set again
1881 1885 * if the port is still not functioning.
1882 1886 */
1883 1887 nvp->nvp_state &= ~NV_FAILED;
1884 1888
1885 1889 /*
1886 1890 * timeouts are not available while the system is
1887 1891 * dropping core, so call nv_monitor_reset() directly
1888 1892 */
1889 1893 if (ddi_in_panic() != 0) {
1890 1894 while (nvp->nvp_state & NV_RESET) {
1891 1895 drv_usecwait(1000);
1892 1896 (void) nv_monitor_reset(nvp);
1893 1897 }
1894 1898
1895 1899 break;
1896 1900 }
1897 1901
1898 1902 break;
1899 1903 case SATA_ADDR_CNTRL:
1900 1904 NVLOG(NVDBG_ENTRY, nvc, nvp,
1901 1905 "nv_sata_reset: controller reset not supported", NULL);
1902 1906
1903 1907 break;
1904 1908 case SATA_ADDR_PMPORT:
1905 1909 case SATA_ADDR_DPMPORT:
1906 1910 NVLOG(NVDBG_ENTRY, nvc, nvp,
1907 1911 "nv_sata_reset: port multipliers not supported", NULL);
1908 1912 /*FALLTHROUGH*/
1909 1913 default:
1910 1914 /*
1911 1915 * unsupported case
1912 1916 */
1913 1917 break;
1914 1918 }
1915 1919
1916 1920 mutex_exit(&nvp->nvp_mutex);
1917 1921
1918 1922 return (ret);
1919 1923 }
1920 1924
1921 1925
1922 1926 /*
1923 1927 * Sata entry point to handle port activation. cfgadm -c connect
1924 1928 */
1925 1929 static int
1926 1930 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1927 1931 {
1928 1932 int cport = sd->satadev_addr.cport;
1929 1933 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1930 1934 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1931 1935 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1932 1936 uint32_t sstatus;
1933 1937
1934 1938 ASSERT(cport < NV_MAX_PORTS(nvc));
1935 1939 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_activate", NULL);
1936 1940
1937 1941 mutex_enter(&nvp->nvp_mutex);
1938 1942
1939 1943 sd->satadev_state = SATA_STATE_READY;
1940 1944
1941 1945 nv_copy_registers(nvp, sd, NULL);
1942 1946
1943 1947 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1944 1948
1945 1949 /*
1946 1950 * initiate link probing and device signature acquisition
1947 1951 */
1948 1952
1949 1953 bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1950 1954
1951 1955 sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
1952 1956
1953 1957 nvp->nvp_type = SATA_DTYPE_NONE;
1954 1958 nvp->nvp_signature = NV_NO_SIG;
1955 1959 nvp->nvp_state &= ~NV_DEACTIVATED;
1956 1960
1957 1961 if (SSTATUS_GET_DET(sstatus) ==
1958 1962 SSTATUS_DET_DEVPRE_PHYCOM) {
1959 1963
1960 1964 nvp->nvp_state |= NV_ATTACH;
1961 1965 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1962 1966 nv_reset(nvp, "sata_activate");
1963 1967
1964 1968 while (nvp->nvp_state & NV_RESET) {
1965 1969 cv_wait(&nvp->nvp_reset_cv, &nvp->nvp_mutex);
1966 1970 }
1967 1971
1968 1972 }
1969 1973
1970 1974 mutex_exit(&nvp->nvp_mutex);
1971 1975
1972 1976 return (SATA_SUCCESS);
1973 1977 }
1974 1978
1975 1979
1976 1980 /*
1977 1981 * Sata entry point to handle port deactivation. cfgadm -c disconnect
1978 1982 */
1979 1983 static int
1980 1984 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1981 1985 {
1982 1986 int cport = sd->satadev_addr.cport;
1983 1987 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1984 1988 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1985 1989
1986 1990 ASSERT(cport < NV_MAX_PORTS(nvc));
1987 1991 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate", NULL);
1988 1992
1989 1993 mutex_enter(&nvp->nvp_mutex);
1990 1994
1991 1995 (void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1992 1996
1993 1997 /*
1994 1998 * make the device inaccessible
1995 1999 */
1996 2000 nvp->nvp_state |= NV_DEACTIVATED;
1997 2001
1998 2002 /*
1999 2003 * disable the interrupts on port
2000 2004 */
2001 2005 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2002 2006
2003 2007 sd->satadev_state = SATA_PSTATE_SHUTDOWN;
2004 2008 nv_copy_registers(nvp, sd, NULL);
2005 2009
2006 2010 mutex_exit(&nvp->nvp_mutex);
2007 2011
2008 2012 return (SATA_SUCCESS);
2009 2013 }
2010 2014
2011 2015
2012 2016 /*
2013 2017 * find an empty slot in the driver's queue, increment counters,
2014 2018 * and then invoke the appropriate PIO or DMA start routine.
2015 2019 */
2016 2020 static int
2017 2021 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
2018 2022 {
2019 2023 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
2020 2024 int on_bit = 0x01, slot, sactive, ret, ncq = 0;
2021 2025 uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2022 2026 int direction = sata_cmdp->satacmd_flags.sata_data_direction;
2023 2027 nv_ctl_t *nvc = nvp->nvp_ctlp;
2024 2028 nv_slot_t *nv_slotp;
2025 2029 boolean_t dma_cmd;
2026 2030
2027 2031 NVLOG(NVDBG_DELIVER, nvc, nvp, "nv_start_common entered: cmd: 0x%x",
2028 2032 sata_cmdp->satacmd_cmd_reg);
2029 2033
2030 2034 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
2031 2035 (cmd == SATAC_READ_FPDMA_QUEUED)) {
2032 2036 nvp->nvp_ncq_run++;
2033 2037 /*
2034 2038 * search for an empty NCQ slot. by the time, it's already
2035 2039 * been determined by the caller that there is room on the
2036 2040 * queue.
2037 2041 */
2038 2042 for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
2039 2043 on_bit <<= 1) {
2040 2044 if ((nvp->nvp_sactive_cache & on_bit) == 0) {
2041 2045 break;
2042 2046 }
2043 2047 }
2044 2048
2045 2049 /*
2046 2050 * the first empty slot found, should not exceed the queue
2047 2051 * depth of the drive. if it does it's an error.
2048 2052 */
2049 2053 ASSERT(slot != nvp->nvp_queue_depth);
2050 2054
2051 2055 sactive = nv_get32(nvc->nvc_bar_hdl[5],
2052 2056 nvp->nvp_sactive);
2053 2057 ASSERT((sactive & on_bit) == 0);
2054 2058 nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
2055 2059 NVLOG(NVDBG_DELIVER, nvc, nvp, "setting SACTIVE onbit: %X",
2056 2060 on_bit);
2057 2061 nvp->nvp_sactive_cache |= on_bit;
2058 2062
2059 2063 ncq = NVSLOT_NCQ;
2060 2064
2061 2065 } else {
2062 2066 nvp->nvp_non_ncq_run++;
2063 2067 slot = 0;
2064 2068 }
2065 2069
2066 2070 nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
2067 2071
2068 2072 ASSERT(nv_slotp->nvslot_spkt == NULL);
2069 2073
2070 2074 nv_slotp->nvslot_spkt = spkt;
2071 2075 nv_slotp->nvslot_flags = ncq;
2072 2076
2073 2077 /*
2074 2078 * the sata module doesn't indicate which commands utilize the
2075 2079 * DMA engine, so find out using this switch table.
2076 2080 */
2077 2081 switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
2078 2082 case SATAC_READ_DMA_EXT:
2079 2083 case SATAC_WRITE_DMA_EXT:
2080 2084 case SATAC_WRITE_DMA:
2081 2085 case SATAC_READ_DMA:
2082 2086 case SATAC_READ_DMA_QUEUED:
2083 2087 case SATAC_READ_DMA_QUEUED_EXT:
2084 2088 case SATAC_WRITE_DMA_QUEUED:
2085 2089 case SATAC_WRITE_DMA_QUEUED_EXT:
2086 2090 case SATAC_READ_FPDMA_QUEUED:
2087 2091 case SATAC_WRITE_FPDMA_QUEUED:
2088 2092 case SATAC_DSM:
2089 2093 dma_cmd = B_TRUE;
2090 2094 break;
2091 2095 default:
2092 2096 dma_cmd = B_FALSE;
2093 2097 }
2094 2098
2095 2099 if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
2096 2100 NVLOG(NVDBG_DELIVER, nvc, nvp, "DMA command", NULL);
2097 2101 nv_slotp->nvslot_start = nv_start_dma;
2098 2102 nv_slotp->nvslot_intr = nv_intr_dma;
2099 2103 } else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
2100 2104 NVLOG(NVDBG_DELIVER, nvc, nvp, "packet command", NULL);
2101 2105 nv_slotp->nvslot_start = nv_start_pkt_pio;
2102 2106 nv_slotp->nvslot_intr = nv_intr_pkt_pio;
2103 2107 if ((direction == SATA_DIR_READ) ||
2104 2108 (direction == SATA_DIR_WRITE)) {
2105 2109 nv_slotp->nvslot_byte_count =
2106 2110 spkt->satapkt_cmd.satacmd_bp->b_bcount;
2107 2111 nv_slotp->nvslot_v_addr =
2108 2112 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2109 2113 /*
2110 2114 * Freeing DMA resources allocated by the sata common
2111 2115 * module to avoid buffer overwrite (dma sync) problems
2112 2116 * when the buffer is released at command completion.
2113 2117 * Primarily an issue on systems with more than
2114 2118 * 4GB of memory.
2115 2119 */
2116 2120 sata_free_dma_resources(spkt);
2117 2121 }
2118 2122 } else if (direction == SATA_DIR_NODATA_XFER) {
2119 2123 NVLOG(NVDBG_DELIVER, nvc, nvp, "non-data command", NULL);
2120 2124 nv_slotp->nvslot_start = nv_start_nodata;
2121 2125 nv_slotp->nvslot_intr = nv_intr_nodata;
2122 2126 } else if (direction == SATA_DIR_READ) {
2123 2127 NVLOG(NVDBG_DELIVER, nvc, nvp, "pio in command", NULL);
2124 2128 nv_slotp->nvslot_start = nv_start_pio_in;
2125 2129 nv_slotp->nvslot_intr = nv_intr_pio_in;
2126 2130 nv_slotp->nvslot_byte_count =
2127 2131 spkt->satapkt_cmd.satacmd_bp->b_bcount;
2128 2132 nv_slotp->nvslot_v_addr =
2129 2133 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2130 2134 /*
2131 2135 * Freeing DMA resources allocated by the sata common module to
2132 2136 * avoid buffer overwrite (dma sync) problems when the buffer
2133 2137 * is released at command completion. This is not an issue
2134 2138 * for write because write does not update the buffer.
2135 2139 * Primarily an issue on systems with more than 4GB of memory.
2136 2140 */
2137 2141 sata_free_dma_resources(spkt);
2138 2142 } else if (direction == SATA_DIR_WRITE) {
2139 2143 NVLOG(NVDBG_DELIVER, nvc, nvp, "pio out command", NULL);
2140 2144 nv_slotp->nvslot_start = nv_start_pio_out;
2141 2145 nv_slotp->nvslot_intr = nv_intr_pio_out;
2142 2146 nv_slotp->nvslot_byte_count =
2143 2147 spkt->satapkt_cmd.satacmd_bp->b_bcount;
2144 2148 nv_slotp->nvslot_v_addr =
2145 2149 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2146 2150 } else {
2147 2151 nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2148 2152 " %d cookies %d cmd %x",
2149 2153 sata_cmdp->satacmd_flags.sata_data_direction,
2150 2154 sata_cmdp->satacmd_num_dma_cookies, cmd);
2151 2155 spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2152 2156 ret = SATA_TRAN_CMD_UNSUPPORTED;
2153 2157
2154 2158 goto fail;
2155 2159 }
2156 2160
2157 2161 if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2158 2162 SATA_TRAN_ACCEPTED) {
2159 2163 #ifdef SGPIO_SUPPORT
2160 2164 nv_sgp_drive_active(nvp->nvp_ctlp,
2161 2165 (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2162 2166 #endif
2163 2167 nv_slotp->nvslot_stime = ddi_get_lbolt();
2164 2168
2165 2169 /*
2166 2170 * start timer if it's not already running and this packet
2167 2171 * is not requesting polled mode.
2168 2172 */
2169 2173 if ((nvp->nvp_timeout_id == 0) &&
2170 2174 ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2171 2175 nv_setup_timeout(nvp, NV_ONE_SEC);
2172 2176 }
2173 2177
2174 2178 nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2175 2179 nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2176 2180
2177 2181 return (SATA_TRAN_ACCEPTED);
2178 2182 }
2179 2183
2180 2184 fail:
2181 2185
2182 2186 spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2183 2187
2184 2188 if (ncq == NVSLOT_NCQ) {
2185 2189 nvp->nvp_ncq_run--;
2186 2190 nvp->nvp_sactive_cache &= ~on_bit;
2187 2191 } else {
2188 2192 nvp->nvp_non_ncq_run--;
2189 2193 }
2190 2194 nv_slotp->nvslot_spkt = NULL;
2191 2195 nv_slotp->nvslot_flags = 0;
2192 2196
2193 2197 return (ret);
2194 2198 }
2195 2199
2196 2200
2197 2201 /*
2198 2202 * Check if the signature is ready and if non-zero translate
2199 2203 * it into a solaris sata defined type.
2200 2204 */
2201 2205 static void
2202 2206 nv_read_signature(nv_port_t *nvp)
2203 2207 {
2204 2208 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2205 2209 int retry_count = 0;
2206 2210
2207 2211 retry:
2208 2212
2209 2213 nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2210 2214 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2211 2215 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2212 2216 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2213 2217
2214 2218 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2215 2219 "nv_read_signature: 0x%x ", nvp->nvp_signature);
2216 2220
2217 2221 switch (nvp->nvp_signature) {
2218 2222
2219 2223 case NV_DISK_SIG:
2220 2224 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp, "drive is a disk", NULL);
2221 2225 DTRACE_PROBE(signature_is_disk_device_p)
2222 2226 nvp->nvp_type = SATA_DTYPE_ATADISK;
2223 2227
2224 2228 break;
2225 2229 case NV_ATAPI_SIG:
2226 2230 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2227 2231 "drive is an optical device", NULL);
2228 2232 DTRACE_PROBE(signature_is_optical_device_p)
2229 2233 nvp->nvp_type = SATA_DTYPE_ATAPICD;
2230 2234 break;
2231 2235 case NV_PM_SIG:
2232 2236 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2233 2237 "device is a port multiplier", NULL);
2234 2238 DTRACE_PROBE(signature_is_port_multiplier_p)
2235 2239 nvp->nvp_type = SATA_DTYPE_PMULT;
2236 2240 break;
2237 2241 case NV_NO_SIG:
2238 2242 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2239 2243 "signature not available", NULL);
2240 2244 DTRACE_PROBE(sig_not_available_p);
2241 2245 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2242 2246 break;
2243 2247 default:
2244 2248 if (retry_count++ == 0) {
2245 2249 /*
2246 2250 * this is a rare corner case where the controller
2247 2251 * is updating the task file registers as the driver
2248 2252 * is reading them. If this happens, wait a bit and
2249 2253 * retry once.
2250 2254 */
2251 2255 NV_DELAY_NSEC(1000000);
2252 2256 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2253 2257 "invalid signature 0x%x retry once",
2254 2258 nvp->nvp_signature);
2255 2259 DTRACE_PROBE1(signature_invalid_retry_once_h,
2256 2260 int, nvp->nvp_signature);
2257 2261
2258 2262 goto retry;
2259 2263 }
2260 2264
2261 2265 nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp,
2262 2266 "invalid signature 0x%x", nvp->nvp_signature);
2263 2267 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2264 2268
2265 2269 break;
2266 2270 }
2267 2271 }
2268 2272
2269 2273
2270 2274 /*
2271 2275 * Set up a new timeout or complete a timeout in microseconds.
2272 2276 * If microseconds is zero, no new timeout is scheduled. Must be
2273 2277 * called at the end of the timeout routine.
2274 2278 */
2275 2279 static void
2276 2280 nv_setup_timeout(nv_port_t *nvp, clock_t microseconds)
2277 2281 {
2278 2282 clock_t old_duration = nvp->nvp_timeout_duration;
2279 2283
2280 2284 if (microseconds == 0) {
2281 2285
2282 2286 return;
2283 2287 }
2284 2288
2285 2289 if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2286 2290 /*
2287 2291 * Since we are dropping the mutex for untimeout,
2288 2292 * the timeout may be executed while we are trying to
2289 2293 * untimeout and setting up a new timeout.
2290 2294 * If nvp_timeout_duration is 0, then this function
2291 2295 * was re-entered. Just exit.
2292 2296 */
2293 2297 cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2294 2298
2295 2299 return;
2296 2300 }
2297 2301
2298 2302 nvp->nvp_timeout_duration = 0;
2299 2303
2300 2304 if (nvp->nvp_timeout_id == 0) {
2301 2305 /*
2302 2306 * start new timer
2303 2307 */
2304 2308 nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2305 2309 drv_usectohz(microseconds));
2306 2310 } else {
2307 2311 /*
2308 2312 * If the currently running timeout is due later than the
2309 2313 * requested one, restart it with a new expiration.
2310 2314 * Our timeouts do not need to be accurate - we would be just
2311 2315 * checking that the specified time was exceeded.
2312 2316 */
2313 2317 if (old_duration > microseconds) {
2314 2318 mutex_exit(&nvp->nvp_mutex);
2315 2319 (void) untimeout(nvp->nvp_timeout_id);
2316 2320 mutex_enter(&nvp->nvp_mutex);
2317 2321 nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2318 2322 drv_usectohz(microseconds));
2319 2323 }
2320 2324 }
2321 2325
2322 2326 nvp->nvp_timeout_duration = microseconds;
2323 2327 }
2324 2328
2325 2329
2326 2330
2327 2331 int nv_reset_length = NV_RESET_LENGTH;
2328 2332
2329 2333 /*
2330 2334 * Reset the port
2331 2335 */
2332 2336 static void
2333 2337 nv_reset(nv_port_t *nvp, char *reason)
2334 2338 {
2335 2339 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2336 2340 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2337 2341 nv_ctl_t *nvc = nvp->nvp_ctlp;
2338 2342 uint32_t sctrl, serr, sstatus;
2339 2343 uint8_t bmicx;
2340 2344 int i, j;
2341 2345 boolean_t reset_success = B_FALSE;
2342 2346
2343 2347 ASSERT(mutex_owned(&nvp->nvp_mutex));
2344 2348
2345 2349 /*
2346 2350 * If the port is reset right after the controller receives
2347 2351 * the DMA activate command (or possibly any other FIS),
2348 2352 * controller operation freezes without any known recovery
2349 2353 * procedure. Until Nvidia advises on a recovery mechanism,
2350 2354 * avoid the situation by waiting sufficiently long to
2351 2355 * ensure the link is not actively transmitting any FIS.
2352 2356 * 100ms was empirically determined to be large enough to
2353 2357 * ensure no transaction was left in flight but not too long
2354 2358 * as to cause any significant thread delay.
2355 2359 */
2356 2360 drv_usecwait(100000);
2357 2361
2358 2362 serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2359 2363 DTRACE_PROBE1(serror_h, int, serr);
2360 2364
2361 2365 /*
2362 2366 * stop DMA engine.
2363 2367 */
2364 2368 bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2365 2369 nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
2366 2370
2367 2371 /*
2368 2372 * the current setting of the NV_RESET in nvp_state indicates whether
2369 2373 * this is the first reset attempt or a retry.
2370 2374 */
2371 2375 if (nvp->nvp_state & NV_RESET) {
2372 2376 nvp->nvp_reset_retry_count++;
2373 2377
2374 2378 NVLOG(NVDBG_RESET, nvc, nvp, "npv_reset_retry_count: %d",
2375 2379 nvp->nvp_reset_retry_count);
2376 2380
2377 2381 } else {
2378 2382 nvp->nvp_reset_retry_count = 0;
2379 2383 nvp->nvp_reset_count++;
2380 2384 nvp->nvp_state |= NV_RESET;
2381 2385
2382 2386 NVLOG(NVDBG_RESET, nvc, nvp, "nvp_reset_count: %d reason: %s "
2383 2387 "serror: 0x%x seq: %d run: %d cmd: 0x%x",
2384 2388 nvp->nvp_reset_count, reason, serr, nvp->nvp_seq,
2385 2389 nvp->nvp_non_ncq_run, nvp->nvp_last_cmd);
2386 2390 }
2387 2391
2388 2392 /*
2389 2393 * a link event could have occurred slightly before excessive
2390 2394 * interrupt processing invokes a reset. Reset handling overrides
2391 2395 * link event processing so it's safe to clear it here.
2392 2396 */
2393 2397 nvp->nvp_state &= ~(NV_RESTORE|NV_LINK_EVENT);
2394 2398
2395 2399 nvp->nvp_reset_time = ddi_get_lbolt();
2396 2400
2397 2401 if ((nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) == 0) {
2398 2402 nv_cmn_err(CE_NOTE, nvc, nvp, "nv_reset: reason: %s serr 0x%x"
2399 2403 " nvp_state: 0x%x", reason, serr, nvp->nvp_state);
2400 2404 /*
2401 2405 * keep a record of why the first reset occurred, for debugging
2402 2406 */
2403 2407 if (nvp->nvp_first_reset_reason[0] == '\0') {
2404 2408 (void) strncpy(nvp->nvp_first_reset_reason,
2405 2409 reason, NV_REASON_LEN);
2406 2410 nvp->nvp_first_reset_reason[NV_REASON_LEN - 1] = '\0';
2407 2411 }
2408 2412 }
2409 2413
2410 2414 (void) strncpy(nvp->nvp_reset_reason, reason, NV_REASON_LEN);
2411 2415
2412 2416 /*
2413 2417 * ensure there is terminating NULL
2414 2418 */
2415 2419 nvp->nvp_reset_reason[NV_REASON_LEN - 1] = '\0';
2416 2420
2417 2421 /*
2418 2422 * Issue hardware reset; retry if necessary.
2419 2423 */
2420 2424 for (i = 0; i < NV_COMRESET_ATTEMPTS; i++) {
2421 2425
2422 2426 /*
2423 2427 * clear signature registers and the error register too
2424 2428 */
2425 2429 nv_put8(cmdhdl, nvp->nvp_sect, 0);
2426 2430 nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2427 2431 nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2428 2432 nv_put8(cmdhdl, nvp->nvp_count, 0);
2429 2433
2430 2434 nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2431 2435
2432 2436 /*
2433 2437 * assert reset in PHY by writing a 1 to bit 0 scontrol
2434 2438 */
2435 2439 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2436 2440
2437 2441 nv_put32(bar5_hdl, nvp->nvp_sctrl,
2438 2442 sctrl | SCONTROL_DET_COMRESET);
2439 2443
2440 2444 /* Wait at least 1ms, as required by the spec */
2441 2445 drv_usecwait(nv_reset_length);
2442 2446
2443 2447 serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2444 2448 DTRACE_PROBE1(aftercomreset_serror_h, int, serr);
2445 2449
2446 2450 /* Reset all accumulated error bits */
2447 2451 nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2448 2452
2449 2453
2450 2454 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2451 2455 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2452 2456 NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2453 2457 "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus);
2454 2458
2455 2459 /* de-assert reset in PHY */
2456 2460 nv_put32(bar5_hdl, nvp->nvp_sctrl,
2457 2461 sctrl & ~SCONTROL_DET_COMRESET);
2458 2462
2459 2463 /*
2460 2464 * Wait up to 10ms for COMINIT to arrive, indicating that
2461 2465 * the device recognized COMRESET.
2462 2466 */
2463 2467 for (j = 0; j < 10; j++) {
2464 2468 drv_usecwait(NV_ONE_MSEC);
2465 2469 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2466 2470 if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2467 2471 (SSTATUS_GET_DET(sstatus) ==
2468 2472 SSTATUS_DET_DEVPRE_PHYCOM)) {
2469 2473 reset_success = B_TRUE;
2470 2474 break;
2471 2475 }
2472 2476 }
2473 2477
2474 2478 if (reset_success == B_TRUE)
2475 2479 break;
2476 2480 }
2477 2481
2478 2482
2479 2483 serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2480 2484 DTRACE_PROBE1(last_serror_h, int, serr);
2481 2485
2482 2486 if (reset_success == B_FALSE) {
2483 2487 NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2484 2488 "after %d attempts. serr: 0x%x", i, serr);
2485 2489 } else {
2486 2490 NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset succeeded"
↓ open down ↓ |
1870 lines elided |
↑ open up ↑ |
2487 2491 " after %dms. serr: 0x%x", TICK_TO_MSEC(ddi_get_lbolt() -
2488 2492 nvp->nvp_reset_time), serr);
2489 2493 }
2490 2494
2491 2495 nvp->nvp_wait_sig = NV_WAIT_SIG;
2492 2496 nv_setup_timeout(nvp, nvp->nvp_wait_sig);
2493 2497 }
2494 2498
2495 2499
2496 2500 /*
2497 - * Initialize register handling specific to mcp51/mcp55
2501 + * Initialize register handling specific to mcp51/mcp55/mcp61
2498 2502 */
2499 2503 /* ARGSUSED */
2500 2504 static void
2501 2505 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2502 2506 {
2503 2507 nv_port_t *nvp;
2504 2508 uchar_t *bar5 = nvc->nvc_bar_addr[5];
2505 2509 uint8_t off, port;
2506 2510
2507 2511 nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2508 2512 nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2509 2513
2510 2514 for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2511 2515 nvp = &(nvc->nvc_port[port]);
2512 2516 nvp->nvp_mcp5x_int_status =
2513 2517 (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2514 2518 nvp->nvp_mcp5x_int_ctl =
2515 2519 (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2516 2520
2517 2521 /*
2518 2522 * clear any previous interrupts asserted
2519 2523 */
2520 2524 nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2521 2525 MCP5X_INT_CLEAR);
2522 2526
2523 2527 /*
2524 2528 * These are the interrupts to accept for now. The spec
2525 2529 * says these are enable bits, but nvidia has indicated
2526 2530 * these are masking bits. Even though they may be masked
2527 2531 * out to prevent asserting the main interrupt, they can
2528 2532 * still be asserted while reading the interrupt status
2529 2533 * register, so that needs to be considered in the interrupt
2530 2534 * handler.
2531 2535 */
2532 2536 nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2533 2537 ~(MCP5X_INT_IGNORE));
2534 2538 }
2535 2539
2536 2540 /*
2537 2541 * Allow the driver to program the BM on the first command instead
2538 2542 * of waiting for an interrupt.
2539 2543 */
2540 2544 #ifdef NCQ
2541 2545 flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
2542 2546 nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2543 2547 flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2544 2548 nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2545 2549 #endif
2546 2550
2547 2551 /*
2548 2552 * mcp55 rev A03 and above supports 40-bit physical addressing.
2549 2553 * Enable DMA to take advantage of that.
2550 2554 *
2551 2555 */
2552 - if (nvc->nvc_revid >= 0xa3) {
2556 + if ((nvc->nvc_devid > 0x37f) ||
2557 + ((nvc->nvc_devid == 0x37f) && (nvc->nvc_revid >= 0xa3))) {
2553 2558 if (nv_sata_40bit_dma == B_TRUE) {
2554 2559 uint32_t reg32;
2555 2560 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2556 - "rev id is %X. 40-bit DMA addressing"
2557 - " enabled", nvc->nvc_revid);
2561 + "devid is %X revid is %X. 40-bit DMA"
2562 + " addressing enabled", nvc->nvc_devid,
2563 + nvc->nvc_revid);
2558 2564 nvc->dma_40bit = B_TRUE;
2559 2565
2560 2566 reg32 = pci_config_get32(pci_conf_handle,
2561 2567 NV_SATA_CFG_20);
2562 2568 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2563 2569 reg32 | NV_40BIT_PRD);
2564 2570
2565 2571 /*
2566 2572 * CFG_23 bits 0-7 contain the top 8 bits (of 40
2567 2573 * bits) for the primary PRD table, and bits 8-15
2568 2574 * contain the top 8 bits for the secondary. Set
2569 2575 * to zero because the DMA attribute table for PRD
2570 2576 * allocation forces it into 32 bit address space
2571 2577 * anyway.
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
2572 2578 */
2573 2579 reg32 = pci_config_get32(pci_conf_handle,
2574 2580 NV_SATA_CFG_23);
2575 2581 pci_config_put32(pci_conf_handle, NV_SATA_CFG_23,
2576 2582 reg32 & 0xffff0000);
2577 2583 } else {
2578 2584 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2579 2585 "40-bit DMA disabled by nv_sata_40bit_dma", NULL);
2580 2586 }
2581 2587 } else {
2582 - nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2583 - "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2588 + nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "devid is %X revid is"
2589 + " %X. Not capable of 40-bit DMA addressing",
2590 + nvc->nvc_devid, nvc->nvc_revid);
2584 2591 }
2585 2592 }
2586 2593
2587 2594
2588 2595 /*
2589 2596 * Initialize register handling specific to ck804
2590 2597 */
2591 2598 static void
2592 2599 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2593 2600 {
2594 2601 uchar_t *bar5 = nvc->nvc_bar_addr[5];
2595 2602 uint32_t reg32;
2596 2603 uint16_t reg16;
2597 2604 nv_port_t *nvp;
2598 2605 int j;
2599 2606
2600 2607 /*
2601 2608 * delay hotplug interrupts until PHYRDY.
2602 2609 */
2603 2610 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2604 2611 pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2605 2612 reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2606 2613
2607 2614 /*
2608 2615 * enable hot plug interrupts for channel x and y
2609 2616 */
2610 2617 reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2611 2618 (uint16_t *)(bar5 + NV_ADMACTL_X));
2612 2619 nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2613 2620 NV_HIRQ_EN | reg16);
2614 2621
2615 2622
2616 2623 reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2617 2624 (uint16_t *)(bar5 + NV_ADMACTL_Y));
2618 2625 nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2619 2626 NV_HIRQ_EN | reg16);
2620 2627
2621 2628 nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2622 2629
2623 2630 /*
2624 2631 * clear any existing interrupt pending then enable
2625 2632 */
2626 2633 for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2627 2634 nvp = &(nvc->nvc_port[j]);
2628 2635 mutex_enter(&nvp->nvp_mutex);
2629 2636 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2630 2637 NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2631 2638 mutex_exit(&nvp->nvp_mutex);
2632 2639 }
2633 2640 }
2634 2641
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
2635 2642
2636 2643 /*
2637 2644 * Initialize the controller and set up driver data structures.
2638 2645 * determine if ck804 or mcp5x class.
2639 2646 */
2640 2647 static int
2641 2648 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2642 2649 {
2643 2650 struct sata_hba_tran stran;
2644 2651 nv_port_t *nvp;
2645 - int j, ck804;
2652 + int j;
2646 2653 uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2647 2654 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2648 2655 uchar_t *bar5 = nvc->nvc_bar_addr[5];
2649 2656 uint32_t reg32;
2650 2657 uint8_t reg8, reg8_save;
2651 2658
2652 2659 NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl entered", NULL);
2653 2660
2654 - ck804 = B_TRUE;
2655 -#ifdef SGPIO_SUPPORT
2656 2661 nvc->nvc_mcp5x_flag = B_FALSE;
2657 -#endif
2658 2662
2659 2663 /*
2660 2664 * Need to set bit 2 to 1 at config offset 0x50
2661 2665 * to enable access to the bar5 registers.
2662 2666 */
2663 2667 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2664 2668 if (!(reg32 & NV_BAR5_SPACE_EN)) {
2665 2669 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2666 2670 reg32 | NV_BAR5_SPACE_EN);
2667 2671 }
2668 2672
2669 2673 /*
2670 2674 * Determine if this is ck804 or mcp5x. ck804 will map in the
2671 2675 * task file registers into bar5 while mcp5x won't. The offset of
2672 2676 * the task file registers in mcp5x's space is unused, so it will
2673 2677 * return zero. So check one of the task file registers to see if it is
2674 2678 * writable and reads back what was written. If it's mcp5x it will
2675 2679 * return back 0xff whereas ck804 will return the value written.
2676 2680 */
2677 2681 reg8_save = nv_get8(bar5_hdl,
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
2678 2682 (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2679 2683
2680 2684
2681 2685 for (j = 1; j < 3; j++) {
2682 2686
2683 2687 nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2684 2688 reg8 = nv_get8(bar5_hdl,
2685 2689 (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2686 2690
2687 2691 if (reg8 != j) {
2688 - ck804 = B_FALSE;
2689 2692 nvc->nvc_mcp5x_flag = B_TRUE;
2690 2693 break;
2691 2694 }
2692 2695 }
2693 2696
2694 2697 nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2695 2698
2696 - if (ck804 == B_TRUE) {
2697 - NVLOG(NVDBG_INIT, nvc, NULL, "controller is CK804", NULL);
2699 + if (nvc->nvc_mcp5x_flag == B_FALSE) {
2700 + NVLOG(NVDBG_INIT, nvc, NULL, "controller is CK804/MCP04",
2701 + NULL);
2698 2702 nvc->nvc_interrupt = ck804_intr;
2699 2703 nvc->nvc_reg_init = ck804_reg_init;
2700 2704 nvc->nvc_set_intr = ck804_set_intr;
2701 2705 } else {
2702 - NVLOG(NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55", NULL);
2706 + NVLOG(NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55/MCP61",
2707 + NULL);
2703 2708 nvc->nvc_interrupt = mcp5x_intr;
2704 2709 nvc->nvc_reg_init = mcp5x_reg_init;
2705 2710 nvc->nvc_set_intr = mcp5x_set_intr;
2706 2711 }
2707 2712
2708 2713
2709 2714 stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2710 2715 stran.sata_tran_hba_dip = nvc->nvc_dip;
2711 2716 stran.sata_tran_hba_num_cports = NV_NUM_PORTS;
2712 2717 stran.sata_tran_hba_features_support =
2713 2718 SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2714 2719 stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2715 2720 stran.sata_tran_probe_port = nv_sata_probe;
2716 2721 stran.sata_tran_start = nv_sata_start;
2717 2722 stran.sata_tran_abort = nv_sata_abort;
2718 2723 stran.sata_tran_reset_dport = nv_sata_reset;
2719 2724 stran.sata_tran_selftest = NULL;
2720 2725 stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2721 2726 stran.sata_tran_pwrmgt_ops = NULL;
2722 2727 stran.sata_tran_ioctl = NULL;
2723 2728 nvc->nvc_sata_hba_tran = stran;
2724 2729
2725 2730 nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2726 2731 KM_SLEEP);
2727 2732
2728 2733 /*
2729 2734 * initialize registers common to all chipsets
2730 2735 */
2731 2736 nv_common_reg_init(nvc);
2732 2737
2733 2738 for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2734 2739 nvp = &(nvc->nvc_port[j]);
2735 2740
2736 2741 cmd_addr = nvp->nvp_cmd_addr;
2737 2742 ctl_addr = nvp->nvp_ctl_addr;
2738 2743 bm_addr = nvp->nvp_bm_addr;
2739 2744
2740 2745 mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2741 2746 DDI_INTR_PRI(nvc->nvc_intr_pri));
2742 2747
2743 2748 cv_init(&nvp->nvp_sync_cv, NULL, CV_DRIVER, NULL);
2744 2749 cv_init(&nvp->nvp_reset_cv, NULL, CV_DRIVER, NULL);
2745 2750
2746 2751 nvp->nvp_data = cmd_addr + NV_DATA;
2747 2752 nvp->nvp_error = cmd_addr + NV_ERROR;
2748 2753 nvp->nvp_feature = cmd_addr + NV_FEATURE;
2749 2754 nvp->nvp_count = cmd_addr + NV_COUNT;
2750 2755 nvp->nvp_sect = cmd_addr + NV_SECT;
2751 2756 nvp->nvp_lcyl = cmd_addr + NV_LCYL;
2752 2757 nvp->nvp_hcyl = cmd_addr + NV_HCYL;
2753 2758 nvp->nvp_drvhd = cmd_addr + NV_DRVHD;
2754 2759 nvp->nvp_status = cmd_addr + NV_STATUS;
2755 2760 nvp->nvp_cmd = cmd_addr + NV_CMD;
2756 2761 nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2757 2762 nvp->nvp_devctl = ctl_addr + NV_DEVCTL;
2758 2763
2759 2764 nvp->nvp_bmicx = bm_addr + BMICX_REG;
2760 2765 nvp->nvp_bmisx = bm_addr + BMISX_REG;
2761 2766 nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2762 2767
2763 2768 nvp->nvp_state = 0;
2764 2769
2765 2770 /*
2766 2771 * Initialize dma handles, etc.
2767 2772 * If it fails, the port is in inactive state.
2768 2773 */
2769 2774 nv_init_port(nvp);
2770 2775 }
2771 2776
2772 2777 /*
2773 2778 * initialize register by calling chip specific reg initialization
2774 2779 */
2775 2780 (*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2776 2781
2777 2782 /* initialize the hba dma attribute */
2778 2783 if (nvc->dma_40bit == B_TRUE)
2779 2784 nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2780 2785 &buffer_dma_40bit_attr;
2781 2786 else
2782 2787 nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2783 2788 &buffer_dma_attr;
2784 2789
2785 2790 return (NV_SUCCESS);
2786 2791 }
2787 2792
2788 2793
2789 2794 /*
2790 2795 * Initialize data structures with enough slots to handle queuing, if
2791 2796 * enabled. NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2792 2797 * NCQ support is built into the driver and enabled. It might have been
2793 2798 * better to derive the true size from the drive itself, but the sata
2794 2799 * module only sends down that information on the first NCQ command,
2795 2800 * which means possibly re-sizing the structures on an interrupt stack,
2796 2801 * making error handling more messy. The easy way is to just allocate
2797 2802 * all 32 slots, which is what most drives support anyway.
2798 2803 */
2799 2804 static void
2800 2805 nv_init_port(nv_port_t *nvp)
2801 2806 {
2802 2807 nv_ctl_t *nvc = nvp->nvp_ctlp;
2803 2808 size_t prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2804 2809 dev_info_t *dip = nvc->nvc_dip;
2805 2810 ddi_device_acc_attr_t dev_attr;
2806 2811 size_t buf_size;
2807 2812 ddi_dma_cookie_t cookie;
2808 2813 uint_t count;
2809 2814 int rc, i;
2810 2815
2811 2816 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2812 2817 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2813 2818 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2814 2819
2815 2820 nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2816 2821 NV_QUEUE_SLOTS, KM_SLEEP);
2817 2822
2818 2823 nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2819 2824 NV_QUEUE_SLOTS, KM_SLEEP);
2820 2825
2821 2826 nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2822 2827 NV_QUEUE_SLOTS, KM_SLEEP);
2823 2828
2824 2829 nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2825 2830 NV_QUEUE_SLOTS, KM_SLEEP);
2826 2831
2827 2832 nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2828 2833 KM_SLEEP);
2829 2834
2830 2835 for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2831 2836
2832 2837 rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2833 2838 DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2834 2839
2835 2840 if (rc != DDI_SUCCESS) {
2836 2841 nv_uninit_port(nvp);
2837 2842
2838 2843 return;
2839 2844 }
2840 2845
2841 2846 rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2842 2847 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2843 2848 NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2844 2849 &(nvp->nvp_sg_acc_hdl[i]));
2845 2850
2846 2851 if (rc != DDI_SUCCESS) {
2847 2852 nv_uninit_port(nvp);
2848 2853
2849 2854 return;
2850 2855 }
2851 2856
2852 2857 rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2853 2858 nvp->nvp_sg_addr[i], buf_size,
2854 2859 DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2855 2860 DDI_DMA_SLEEP, NULL, &cookie, &count);
2856 2861
2857 2862 if (rc != DDI_DMA_MAPPED) {
2858 2863 nv_uninit_port(nvp);
2859 2864
2860 2865 return;
2861 2866 }
2862 2867
2863 2868 ASSERT(count == 1);
2864 2869 ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2865 2870
2866 2871 ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2867 2872
2868 2873 nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2869 2874 }
2870 2875
2871 2876 /*
2872 2877 * nvp_queue_depth represents the actual drive queue depth, not the
2873 2878 * number of slots allocated in the structures (which may be more).
2874 2879 * Actual queue depth is only learned after the first NCQ command, so
2875 2880 * initialize it to 1 for now.
2876 2881 */
2877 2882 nvp->nvp_queue_depth = 1;
2878 2883
2879 2884 /*
2880 2885 * Port is initialized whether the device is attached or not.
2881 2886 * Link processing and device identification will be started later,
2882 2887 * after interrupts are initialized.
2883 2888 */
2884 2889 nvp->nvp_type = SATA_DTYPE_NONE;
2885 2890 }
2886 2891
2887 2892
2888 2893 /*
2889 2894 * Free dynamically allocated structures for port.
2890 2895 */
2891 2896 static void
2892 2897 nv_uninit_port(nv_port_t *nvp)
2893 2898 {
2894 2899 int i;
2895 2900
2896 2901 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2897 2902 "nv_uninit_port uninitializing", NULL);
2898 2903
2899 2904 #ifdef SGPIO_SUPPORT
2900 2905 if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2901 2906 nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2902 2907 nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2903 2908 }
2904 2909 #endif
2905 2910
2906 2911 nvp->nvp_type = SATA_DTYPE_NONE;
2907 2912
2908 2913 for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2909 2914 if (nvp->nvp_sg_paddr[i]) {
2910 2915 (void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2911 2916 }
2912 2917
2913 2918 if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2914 2919 ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2915 2920 }
2916 2921
2917 2922 if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2918 2923 ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2919 2924 }
2920 2925 }
2921 2926
2922 2927 kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2923 2928 nvp->nvp_slot = NULL;
2924 2929
2925 2930 kmem_free(nvp->nvp_sg_dma_hdl,
2926 2931 sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2927 2932 nvp->nvp_sg_dma_hdl = NULL;
2928 2933
2929 2934 kmem_free(nvp->nvp_sg_acc_hdl,
2930 2935 sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2931 2936 nvp->nvp_sg_acc_hdl = NULL;
2932 2937
2933 2938 kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2934 2939 nvp->nvp_sg_addr = NULL;
2935 2940
2936 2941 kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2937 2942 nvp->nvp_sg_paddr = NULL;
2938 2943 }
2939 2944
2940 2945
2941 2946 /*
2942 2947 * Cache register offsets and access handles to frequently accessed registers
2943 2948 * which are common to either chipset.
2944 2949 */
2945 2950 static void
2946 2951 nv_common_reg_init(nv_ctl_t *nvc)
2947 2952 {
2948 2953 uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2949 2954 uchar_t *bm_addr_offset, *sreg_offset;
2950 2955 uint8_t bar, port;
2951 2956 nv_port_t *nvp;
2952 2957
2953 2958 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2954 2959 if (port == 0) {
2955 2960 bar = NV_BAR_0;
2956 2961 bm_addr_offset = 0;
2957 2962 sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2958 2963 } else {
2959 2964 bar = NV_BAR_2;
2960 2965 bm_addr_offset = (uchar_t *)8;
2961 2966 sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2962 2967 }
2963 2968
2964 2969 nvp = &(nvc->nvc_port[port]);
2965 2970 nvp->nvp_ctlp = nvc;
2966 2971 nvp->nvp_port_num = port;
2967 2972 NVLOG(NVDBG_INIT, nvc, nvp, "setting up port mappings", NULL);
2968 2973
2969 2974 nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2970 2975 nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2971 2976 nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2972 2977 nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2973 2978 nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2974 2979 nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2975 2980 (long)bm_addr_offset;
2976 2981
2977 2982 nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2978 2983 nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2979 2984 nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2980 2985 nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2981 2986 }
2982 2987 }
2983 2988
2984 2989
2985 2990 static void
2986 2991 nv_uninit_ctl(nv_ctl_t *nvc)
2987 2992 {
2988 2993 int port;
2989 2994 nv_port_t *nvp;
2990 2995
2991 2996 NVLOG(NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered", NULL);
2992 2997
2993 2998 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2994 2999 nvp = &(nvc->nvc_port[port]);
2995 3000 mutex_enter(&nvp->nvp_mutex);
2996 3001 NVLOG(NVDBG_INIT, nvc, nvp, "uninitializing port", NULL);
2997 3002 nv_uninit_port(nvp);
2998 3003 mutex_exit(&nvp->nvp_mutex);
2999 3004 mutex_destroy(&nvp->nvp_mutex);
3000 3005 cv_destroy(&nvp->nvp_sync_cv);
3001 3006 cv_destroy(&nvp->nvp_reset_cv);
3002 3007 }
3003 3008
3004 3009 kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
3005 3010 nvc->nvc_port = NULL;
3006 3011 }
3007 3012
3008 3013
3009 3014 /*
3010 3015 * ck804 interrupt. This is a wrapper around ck804_intr_process so
3011 3016 * that interrupts from other devices can be disregarded while dtracing.
3012 3017 */
3013 3018 /* ARGSUSED */
3014 3019 static uint_t
3015 3020 ck804_intr(caddr_t arg1, caddr_t arg2)
3016 3021 {
3017 3022 nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3018 3023 uint8_t intr_status;
3019 3024 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3020 3025
3021 3026 if (nvc->nvc_state & NV_CTRL_SUSPEND)
3022 3027 return (DDI_INTR_UNCLAIMED);
3023 3028
3024 3029 intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3025 3030
3026 3031 if (intr_status == 0) {
3027 3032
3028 3033 return (DDI_INTR_UNCLAIMED);
3029 3034 }
3030 3035
3031 3036 ck804_intr_process(nvc, intr_status);
3032 3037
3033 3038 return (DDI_INTR_CLAIMED);
3034 3039 }
3035 3040
3036 3041
3037 3042 /*
3038 3043 * Main interrupt handler for ck804. handles normal device
3039 3044 * interrupts and hot plug and remove interrupts.
3040 3045 *
3041 3046 */
3042 3047 static void
3043 3048 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
3044 3049 {
3045 3050
3046 3051 int port, i;
3047 3052 nv_port_t *nvp;
3048 3053 nv_slot_t *nv_slotp;
3049 3054 uchar_t status;
3050 3055 sata_pkt_t *spkt;
3051 3056 uint8_t bmstatus, clear_bits;
3052 3057 ddi_acc_handle_t bmhdl;
3053 3058 int nvcleared = 0;
3054 3059 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3055 3060 uint32_t sstatus;
3056 3061 int port_mask_hot[] = {
3057 3062 CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
3058 3063 };
3059 3064 int port_mask_pm[] = {
3060 3065 CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
3061 3066 };
3062 3067
3063 3068 NVLOG(NVDBG_INTR, nvc, NULL,
3064 3069 "ck804_intr_process entered intr_status=%x", intr_status);
3065 3070
3066 3071 /*
3067 3072 * For command completion interrupt, explicit clear is not required.
3068 3073 * however, for the error cases explicit clear is performed.
3069 3074 */
3070 3075 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3071 3076
3072 3077 int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3073 3078
3074 3079 if ((port_mask[port] & intr_status) == 0) {
3075 3080
3076 3081 continue;
3077 3082 }
3078 3083
3079 3084 NVLOG(NVDBG_INTR, nvc, NULL,
3080 3085 "ck804_intr_process interrupt on port %d", port);
3081 3086
3082 3087 nvp = &(nvc->nvc_port[port]);
3083 3088
3084 3089 mutex_enter(&nvp->nvp_mutex);
3085 3090
3086 3091 /*
3087 3092 * this case might be encountered when the other port
3088 3093 * is active
3089 3094 */
3090 3095 if (nvp->nvp_state & NV_DEACTIVATED) {
3091 3096
3092 3097 /*
3093 3098 * clear interrupt bits
3094 3099 */
3095 3100 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3096 3101 port_mask[port]);
3097 3102
3098 3103 mutex_exit(&nvp->nvp_mutex);
3099 3104
3100 3105 continue;
3101 3106 }
3102 3107
3103 3108
3104 3109 if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL) {
3105 3110 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3106 3111 NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3107 3112 " no command in progress status=%x", status);
3108 3113 mutex_exit(&nvp->nvp_mutex);
3109 3114
3110 3115 /*
3111 3116 * clear interrupt bits
3112 3117 */
3113 3118 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3114 3119 port_mask[port]);
3115 3120
3116 3121 continue;
3117 3122 }
3118 3123
3119 3124 bmhdl = nvp->nvp_bm_hdl;
3120 3125 bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3121 3126
3122 3127 if (!(bmstatus & BMISX_IDEINTS)) {
3123 3128 mutex_exit(&nvp->nvp_mutex);
3124 3129
3125 3130 continue;
3126 3131 }
3127 3132
3128 3133 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3129 3134
3130 3135 if (status & SATA_STATUS_BSY) {
3131 3136 mutex_exit(&nvp->nvp_mutex);
3132 3137
3133 3138 continue;
3134 3139 }
3135 3140
3136 3141 nv_slotp = &(nvp->nvp_slot[0]);
3137 3142
3138 3143 ASSERT(nv_slotp);
3139 3144
3140 3145 spkt = nv_slotp->nvslot_spkt;
3141 3146
3142 3147 if (spkt == NULL) {
3143 3148 mutex_exit(&nvp->nvp_mutex);
3144 3149
3145 3150 continue;
3146 3151 }
3147 3152
3148 3153 (*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3149 3154
3150 3155 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3151 3156
3152 3157 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3153 3158
3154 3159 nv_complete_io(nvp, spkt, 0);
3155 3160 }
3156 3161
3157 3162 mutex_exit(&nvp->nvp_mutex);
3158 3163 }
3159 3164
3160 3165 /*
3161 3166 * ck804 often doesn't correctly distinguish hot add/remove
3162 3167 * interrupts. Frequently both the ADD and the REMOVE bits
3163 3168 * are asserted, whether it was a remove or add. Use sstatus
3164 3169 * to distinguish hot add from hot remove.
3165 3170 */
3166 3171
3167 3172 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3168 3173 clear_bits = 0;
3169 3174
3170 3175 nvp = &(nvc->nvc_port[port]);
3171 3176 mutex_enter(&nvp->nvp_mutex);
3172 3177
3173 3178 if ((port_mask_pm[port] & intr_status) != 0) {
3174 3179 clear_bits = port_mask_pm[port];
3175 3180 NVLOG(NVDBG_HOT, nvc, nvp,
3176 3181 "clearing PM interrupt bit: %x",
3177 3182 intr_status & port_mask_pm[port]);
3178 3183 }
3179 3184
3180 3185 if ((port_mask_hot[port] & intr_status) == 0) {
3181 3186 if (clear_bits != 0) {
3182 3187 goto clear;
3183 3188 } else {
3184 3189 mutex_exit(&nvp->nvp_mutex);
3185 3190 continue;
3186 3191 }
3187 3192 }
3188 3193
3189 3194 /*
3190 3195 * reaching here means there was a hot add or remove.
3191 3196 */
3192 3197 clear_bits |= port_mask_hot[port];
3193 3198
3194 3199 ASSERT(nvc->nvc_port[port].nvp_sstatus);
3195 3200
3196 3201 sstatus = nv_get32(bar5_hdl,
3197 3202 nvc->nvc_port[port].nvp_sstatus);
3198 3203
3199 3204 if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3200 3205 SSTATUS_DET_DEVPRE_PHYCOM) {
3201 3206 nv_link_event(nvp, NV_REM_DEV);
3202 3207 } else {
3203 3208 nv_link_event(nvp, NV_ADD_DEV);
3204 3209 }
3205 3210 clear:
3206 3211 /*
3207 3212 * clear interrupt bits. explicit interrupt clear is
3208 3213 * required for hotplug interrupts.
3209 3214 */
3210 3215 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3211 3216
3212 3217 /*
3213 3218 * make sure it's flushed and cleared. If not try
3214 3219 * again. Sometimes it has been observed to not clear
3215 3220 * on the first try.
3216 3221 */
3217 3222 intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3218 3223
3219 3224 /*
3220 3225 * make 10 additional attempts to clear the interrupt
3221 3226 */
3222 3227 for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3223 3228 NVLOG(NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3224 3229 "still not clear try=%d", intr_status,
3225 3230 ++nvcleared);
3226 3231 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3227 3232 clear_bits);
3228 3233 intr_status = nv_get8(bar5_hdl,
3229 3234 nvc->nvc_ck804_int_status);
3230 3235 }
3231 3236
3232 3237 /*
3233 3238 * if still not clear, log a message and disable the
3234 3239 * port. highly unlikely that this path is taken, but it
3235 3240 * gives protection against a wedged interrupt.
3236 3241 */
3237 3242 if (intr_status & clear_bits) {
3238 3243 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3239 3244 nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3240 3245 SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3241 3246 nvp->nvp_state |= NV_FAILED;
3242 3247 (void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3243 3248 B_TRUE);
3244 3249 nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3245 3250 "interrupt. disabling port intr_status=%X",
3246 3251 intr_status);
3247 3252 }
3248 3253
3249 3254 mutex_exit(&nvp->nvp_mutex);
3250 3255 }
3251 3256 }
3252 3257
3253 3258
3254 3259 /*
3255 3260 * Interrupt handler for mcp5x. It is invoked by the wrapper for each port
3256 3261 * on the controller, to handle completion and hot plug and remove events.
3257 3262 */
3258 3263 static uint_t
3259 3264 mcp5x_intr_port(nv_port_t *nvp)
3260 3265 {
3261 3266 nv_ctl_t *nvc = nvp->nvp_ctlp;
3262 3267 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3263 3268 uint8_t clear = 0, intr_cycles = 0;
3264 3269 int ret = DDI_INTR_UNCLAIMED;
3265 3270 uint16_t int_status;
3266 3271 clock_t intr_time;
3267 3272 int loop_cnt = 0;
3268 3273
3269 3274 nvp->intr_start_time = ddi_get_lbolt();
3270 3275
3271 3276 NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered", NULL);
3272 3277
3273 3278 do {
3274 3279 /*
3275 3280 * read current interrupt status
3276 3281 */
3277 3282 int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3278 3283
3279 3284 /*
3280 3285 * if the port is deactivated, just clear the interrupt and
3281 3286 * return. can get here even if interrupts were disabled
3282 3287 * on this port but enabled on the other.
3283 3288 */
3284 3289 if (nvp->nvp_state & NV_DEACTIVATED) {
3285 3290 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3286 3291 int_status);
3287 3292
3288 3293 return (DDI_INTR_CLAIMED);
3289 3294 }
3290 3295
3291 3296 NVLOG(NVDBG_INTR, nvc, nvp, "int_status = %x", int_status);
3292 3297
3293 3298 DTRACE_PROBE1(int_status_before_h, int, int_status);
3294 3299
3295 3300 /*
3296 3301 * MCP5X_INT_IGNORE interrupts will show up in the status,
3297 3302 * but are masked out from causing an interrupt to be generated
3298 3303 * to the processor. Ignore them here by masking them out.
3299 3304 */
3300 3305 int_status &= ~(MCP5X_INT_IGNORE);
3301 3306
3302 3307 DTRACE_PROBE1(int_status_after_h, int, int_status);
3303 3308
3304 3309 /*
3305 3310 * exit the loop when no more interrupts to process
3306 3311 */
3307 3312 if (int_status == 0) {
3308 3313
3309 3314 break;
3310 3315 }
3311 3316
3312 3317 if (int_status & MCP5X_INT_COMPLETE) {
3313 3318 NVLOG(NVDBG_INTR, nvc, nvp,
3314 3319 "mcp5x_packet_complete_intr", NULL);
3315 3320 /*
3316 3321 * since int_status was set, return DDI_INTR_CLAIMED
3317 3322 * from the DDI's perspective even though the packet
3318 3323 * completion may not have succeeded. If it fails,
3319 3324 * need to manually clear the interrupt, otherwise
3320 3325 * clearing is implicit as a result of reading the
3321 3326 * task file status register.
3322 3327 */
3323 3328 ret = DDI_INTR_CLAIMED;
3324 3329 if (mcp5x_packet_complete_intr(nvc, nvp) ==
3325 3330 NV_FAILURE) {
3326 3331 clear |= MCP5X_INT_COMPLETE;
3327 3332 } else {
3328 3333 intr_cycles = 0;
3329 3334 }
3330 3335 }
3331 3336
3332 3337 if (int_status & MCP5X_INT_DMA_SETUP) {
3333 3338 NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr",
3334 3339 NULL);
3335 3340
3336 3341 /*
3337 3342 * Needs to be cleared before starting the BM, so do it
3338 3343 * now. make sure this is still working.
3339 3344 */
3340 3345 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3341 3346 MCP5X_INT_DMA_SETUP);
3342 3347 #ifdef NCQ
3343 3348 ret = mcp5x_dma_setup_intr(nvc, nvp);
3344 3349 #endif
3345 3350 }
3346 3351
3347 3352 if (int_status & MCP5X_INT_REM) {
3348 3353 clear |= MCP5X_INT_REM;
3349 3354 ret = DDI_INTR_CLAIMED;
3350 3355
3351 3356 mutex_enter(&nvp->nvp_mutex);
3352 3357 nv_link_event(nvp, NV_REM_DEV);
3353 3358 mutex_exit(&nvp->nvp_mutex);
3354 3359
3355 3360 } else if (int_status & MCP5X_INT_ADD) {
3356 3361 clear |= MCP5X_INT_ADD;
3357 3362 ret = DDI_INTR_CLAIMED;
3358 3363
3359 3364 mutex_enter(&nvp->nvp_mutex);
3360 3365 nv_link_event(nvp, NV_ADD_DEV);
3361 3366 mutex_exit(&nvp->nvp_mutex);
3362 3367 }
3363 3368 if (clear) {
3364 3369 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3365 3370 clear = 0;
3366 3371 }
3367 3372
3368 3373 /*
3369 3374 * protect against a stuck interrupt
3370 3375 */
3371 3376 if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3372 3377
3373 3378 NVLOG(NVDBG_INTR, nvc, nvp, "excessive interrupt "
3374 3379 "processing. Disabling interrupts int_status=%X"
3375 3380 " clear=%X", int_status, clear);
3376 3381 DTRACE_PROBE(excessive_interrupts_f);
3377 3382
3378 3383 mutex_enter(&nvp->nvp_mutex);
3379 3384 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3380 3385 /*
3381 3386 * reset the device. If it remains inaccessible
3382 3387 * after a reset it will be failed then.
3383 3388 */
3384 3389 (void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3385 3390 B_TRUE);
3386 3391 mutex_exit(&nvp->nvp_mutex);
3387 3392 }
3388 3393
3389 3394 } while (loop_cnt++ < nv_max_intr_loops);
3390 3395
3391 3396 if (loop_cnt > nvp->intr_loop_cnt) {
3392 3397 NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp,
3393 3398 "Exiting with multiple intr loop count %d", loop_cnt);
3394 3399 nvp->intr_loop_cnt = loop_cnt;
3395 3400 }
3396 3401
3397 3402 if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3398 3403 (NVDBG_INTR | NVDBG_VERBOSE)) {
3399 3404 uint8_t status, bmstatus;
3400 3405 uint16_t int_status2;
3401 3406
3402 3407 if (int_status & MCP5X_INT_COMPLETE) {
3403 3408 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3404 3409 bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3405 3410 int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3406 3411 nvp->nvp_mcp5x_int_status);
3407 3412 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3408 3413 "mcp55_intr_port: Exiting with altstatus %x, "
3409 3414 "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3410 3415 " loop_cnt %d ", status, bmstatus, int_status2,
3411 3416 int_status, ret, loop_cnt);
3412 3417 }
3413 3418 }
3414 3419
3415 3420 NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret);
3416 3421
3417 3422 /*
3418 3423 * To facilitate debugging, keep track of the length of time spent in
3419 3424 * the port interrupt routine.
3420 3425 */
3421 3426 intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3422 3427 if (intr_time > nvp->intr_duration)
3423 3428 nvp->intr_duration = intr_time;
3424 3429
3425 3430 return (ret);
3426 3431 }
3427 3432
3428 3433
3429 3434 /* ARGSUSED */
3430 3435 static uint_t
3431 3436 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3432 3437 {
3433 3438 nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3434 3439 int ret;
3435 3440
3436 3441 if (nvc->nvc_state & NV_CTRL_SUSPEND)
3437 3442 return (DDI_INTR_UNCLAIMED);
3438 3443
3439 3444 ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3440 3445 ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3441 3446
3442 3447 return (ret);
3443 3448 }
3444 3449
3445 3450
3446 3451 #ifdef NCQ
3447 3452 /*
3448 3453 * with software driven NCQ on mcp5x, an interrupt occurs right
3449 3454 * before the drive is ready to do a DMA transfer. At this point,
3450 3455 * the PRD table needs to be programmed and the DMA engine enabled
3451 3456 * and ready to go.
3452 3457 *
3453 3458 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3454 3459 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3455 3460 * -- clear bit 0 of master command reg
3456 3461 * -- program PRD
3457 3462 * -- clear the interrupt status bit for the DMA Setup FIS
3458 3463 * -- set bit 0 of the bus master command register
3459 3464 */
3460 3465 static int
3461 3466 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3462 3467 {
3463 3468 int slot;
3464 3469 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3465 3470 uint8_t bmicx;
3466 3471 int port = nvp->nvp_port_num;
3467 3472 uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3468 3473 MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3469 3474
3470 3475 nv_cmn_err(CE_PANIC, nvc, nvp,
3471 3476 "this is should not be executed at all until NCQ");
3472 3477
3473 3478 mutex_enter(&nvp->nvp_mutex);
3474 3479
3475 3480 slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3476 3481
3477 3482 slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3478 3483
3479 3484 NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3480 3485 " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache);
3481 3486
3482 3487 /*
3483 3488 * halt the DMA engine. This step is necessary according to
3484 3489 * the mcp5x spec, probably since there may have been a "first" packet
3485 3490 * that already programmed the DMA engine, but may not turn out to
3486 3491 * be the first one processed.
3487 3492 */
3488 3493 bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3489 3494
3490 3495 if (bmicx & BMICX_SSBM) {
3491 3496 NVLOG(NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3492 3497 "another packet. Cancelling and reprogramming", NULL);
3493 3498 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
3494 3499 }
3495 3500 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
3496 3501
3497 3502 nv_start_dma_engine(nvp, slot);
3498 3503
3499 3504 mutex_exit(&nvp->nvp_mutex);
3500 3505
3501 3506 return (DDI_INTR_CLAIMED);
3502 3507 }
3503 3508 #endif /* NCQ */
3504 3509
3505 3510
3506 3511 /*
3507 3512 * packet completion interrupt. If the packet is complete, invoke
3508 3513 * the packet completion callback.
3509 3514 */
3510 3515 static int
3511 3516 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3512 3517 {
3513 3518 uint8_t status, bmstatus;
3514 3519 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3515 3520 int sactive;
3516 3521 int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3517 3522 sata_pkt_t *spkt;
3518 3523 nv_slot_t *nv_slotp;
3519 3524
3520 3525 mutex_enter(&nvp->nvp_mutex);
3521 3526
3522 3527 bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3523 3528
3524 3529 if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3525 3530 DTRACE_PROBE1(bmstatus_h, int, bmstatus);
3526 3531 NVLOG(NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set %x",
3527 3532 bmstatus);
3528 3533 mutex_exit(&nvp->nvp_mutex);
3529 3534
3530 3535 return (NV_FAILURE);
3531 3536 }
3532 3537
3533 3538 /*
3534 3539 * Commands may have been processed by abort or timeout before
3535 3540 * interrupt processing acquired the mutex. So we may be processing
3536 3541 * an interrupt for packets that were already removed.
3537 3542 * For functioning NCQ processing all slots may be checked, but
3538 3543 * with NCQ disabled (current code), relying on *_run flags is OK.
3539 3544 */
3540 3545 if (nvp->nvp_non_ncq_run) {
3541 3546 /*
3542 3547 * If the just completed item is a non-ncq command, the busy
3543 3548 * bit should not be set
3544 3549 */
3545 3550 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3546 3551 if (status & SATA_STATUS_BSY) {
3547 3552 nv_cmn_err(CE_WARN, nvc, nvp,
3548 3553 "unexpected SATA_STATUS_BSY set");
3549 3554 DTRACE_PROBE(unexpected_status_bsy_p);
3550 3555 mutex_exit(&nvp->nvp_mutex);
3551 3556 /*
3552 3557 * calling function will clear interrupt. then
3553 3558 * the real interrupt will either arrive or the
3554 3559 * packet timeout handling will take over and
3555 3560 * reset.
3556 3561 */
3557 3562 return (NV_FAILURE);
3558 3563 }
3559 3564 ASSERT(nvp->nvp_ncq_run == 0);
3560 3565 } else {
3561 3566 ASSERT(nvp->nvp_non_ncq_run == 0);
3562 3567 /*
3563 3568 * Pre-NCQ code!
3564 3569 * Nothing to do. The packet for the command that just
3565 3570 * completed is already gone. Just clear the interrupt.
3566 3571 */
3567 3572 (void) nv_bm_status_clear(nvp);
3568 3573 (void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3569 3574 mutex_exit(&nvp->nvp_mutex);
3570 3575 return (NV_SUCCESS);
3571 3576
3572 3577 /*
3573 3578 * NCQ check for BSY here and wait if still bsy before
3574 3579 * continuing. Rather than wait for it to be cleared
3575 3580 * when starting a packet and wasting CPU time, the starting
3576 3581 * thread can exit immediate, but might have to spin here
3577 3582 * for a bit possibly. Needs more work and experimentation.
3578 3583 *
3579 3584 */
3580 3585 }
3581 3586
3582 3587 /*
3583 3588 * active_pkt_bit will represent the bitmap of the single completed
3584 3589 * packet. Because of the nature of sw assisted NCQ, only one
3585 3590 * command will complete per interrupt.
3586 3591 */
3587 3592
3588 3593 if (ncq_command == B_FALSE) {
3589 3594 active_pkt = 0;
3590 3595 } else {
3591 3596 /*
3592 3597 * NCQ: determine which command just completed, by examining
3593 3598 * which bit cleared in the register since last written.
3594 3599 */
3595 3600 sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3596 3601
3597 3602 active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3598 3603
3599 3604 ASSERT(active_pkt_bit);
3600 3605
3601 3606
3602 3607 /*
3603 3608 * this failure path needs more work to handle the
3604 3609 * error condition and recovery.
3605 3610 */
3606 3611 if (active_pkt_bit == 0) {
3607 3612 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3608 3613
3609 3614 nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X "
3610 3615 "nvp->nvp_sactive %X", sactive,
3611 3616 nvp->nvp_sactive_cache);
3612 3617
3613 3618 (void) nv_get8(cmdhdl, nvp->nvp_status);
3614 3619
3615 3620 mutex_exit(&nvp->nvp_mutex);
3616 3621
3617 3622 return (NV_FAILURE);
3618 3623 }
3619 3624
3620 3625 for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3621 3626 active_pkt++, active_pkt_bit >>= 1) {
3622 3627 }
3623 3628
3624 3629 /*
3625 3630 * make sure only one bit is ever turned on
3626 3631 */
3627 3632 ASSERT(active_pkt_bit == 1);
3628 3633
3629 3634 nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3630 3635 }
3631 3636
3632 3637 nv_slotp = &(nvp->nvp_slot[active_pkt]);
3633 3638
3634 3639 spkt = nv_slotp->nvslot_spkt;
3635 3640
3636 3641 ASSERT(spkt != NULL);
3637 3642
3638 3643 (*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3639 3644
3640 3645 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3641 3646
3642 3647 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3643 3648
3644 3649 nv_complete_io(nvp, spkt, active_pkt);
3645 3650 }
3646 3651
3647 3652 mutex_exit(&nvp->nvp_mutex);
3648 3653
3649 3654 return (NV_SUCCESS);
3650 3655 }
3651 3656
3652 3657
3653 3658 static void
3654 3659 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3655 3660 {
3656 3661
3657 3662 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3658 3663
3659 3664 if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3660 3665 nvp->nvp_ncq_run--;
3661 3666 } else {
3662 3667 nvp->nvp_non_ncq_run--;
3663 3668 }
3664 3669
3665 3670 /*
3666 3671 * mark the packet slot idle so it can be reused. Do this before
3667 3672 * calling satapkt_comp so the slot can be reused.
3668 3673 */
3669 3674 (&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3670 3675
3671 3676 if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3672 3677 /*
3673 3678 * If this is not timed polled mode cmd, which has an
3674 3679 * active thread monitoring for completion, then need
3675 3680 * to signal the sleeping thread that the cmd is complete.
3676 3681 */
3677 3682 if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3678 3683 cv_signal(&nvp->nvp_sync_cv);
3679 3684 }
3680 3685
3681 3686 return;
3682 3687 }
3683 3688
3684 3689 if (spkt->satapkt_comp != NULL) {
3685 3690 mutex_exit(&nvp->nvp_mutex);
3686 3691 (*spkt->satapkt_comp)(spkt);
3687 3692 mutex_enter(&nvp->nvp_mutex);
3688 3693 }
3689 3694 }
3690 3695
3691 3696
3692 3697 /*
3693 3698 * check whether packet is ncq command or not. for ncq command,
3694 3699 * start it if there is still room on queue. for non-ncq command only
3695 3700 * start if no other command is running.
3696 3701 */
3697 3702 static int
3698 3703 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3699 3704 {
3700 3705 uint8_t cmd, ncq;
3701 3706
3702 3707 NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry", NULL);
3703 3708
3704 3709 cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3705 3710
3706 3711 ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3707 3712 (cmd == SATAC_READ_FPDMA_QUEUED));
3708 3713
3709 3714 if (ncq == B_FALSE) {
3710 3715
3711 3716 if ((nvp->nvp_non_ncq_run == 1) ||
3712 3717 (nvp->nvp_ncq_run > 0)) {
3713 3718 /*
3714 3719 * next command is non-ncq which can't run
3715 3720 * concurrently. exit and return queue full.
3716 3721 */
3717 3722 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3718 3723
3719 3724 return (SATA_TRAN_QUEUE_FULL);
3720 3725 }
3721 3726
3722 3727 return (nv_start_common(nvp, spkt));
3723 3728 }
3724 3729
3725 3730 /*
3726 3731 * ncq == B_TRUE
3727 3732 */
3728 3733 if (nvp->nvp_non_ncq_run == 1) {
3729 3734 /*
3730 3735 * cannot start any NCQ commands when there
3731 3736 * is a non-NCQ command running.
3732 3737 */
3733 3738 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3734 3739
3735 3740 return (SATA_TRAN_QUEUE_FULL);
3736 3741 }
3737 3742
3738 3743 #ifdef NCQ
3739 3744 /*
3740 3745 * this is not compiled for now as satapkt_device.satadev_qdepth
3741 3746 * is being pulled out until NCQ support is later addressed
3742 3747 *
3743 3748 * nvp_queue_depth is initialized by the first NCQ command
3744 3749 * received.
3745 3750 */
3746 3751 if (nvp->nvp_queue_depth == 1) {
3747 3752 nvp->nvp_queue_depth =
3748 3753 spkt->satapkt_device.satadev_qdepth;
3749 3754
3750 3755 ASSERT(nvp->nvp_queue_depth > 1);
3751 3756
3752 3757 NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3753 3758 "nv_process_queue: nvp_queue_depth set to %d",
3754 3759 nvp->nvp_queue_depth);
3755 3760 }
3756 3761 #endif
3757 3762
3758 3763 if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3759 3764 /*
3760 3765 * max number of NCQ commands already active
3761 3766 */
3762 3767 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3763 3768
3764 3769 return (SATA_TRAN_QUEUE_FULL);
3765 3770 }
3766 3771
3767 3772 return (nv_start_common(nvp, spkt));
3768 3773 }
3769 3774
3770 3775
3771 3776 /*
3772 3777 * configure INTx and legacy interrupts
3773 3778 */
3774 3779 static int
3775 3780 nv_add_legacy_intrs(nv_ctl_t *nvc)
3776 3781 {
3777 3782 dev_info_t *devinfo = nvc->nvc_dip;
3778 3783 int actual, count = 0;
3779 3784 int x, y, rc, inum = 0;
3780 3785
3781 3786 NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_legacy_intrs", NULL);
3782 3787
3783 3788 /*
3784 3789 * get number of interrupts
3785 3790 */
3786 3791 rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3787 3792 if ((rc != DDI_SUCCESS) || (count == 0)) {
3788 3793 NVLOG(NVDBG_INIT, nvc, NULL,
3789 3794 "ddi_intr_get_nintrs() failed, "
3790 3795 "rc %d count %d", rc, count);
3791 3796
3792 3797 return (DDI_FAILURE);
3793 3798 }
3794 3799
3795 3800 /*
3796 3801 * allocate an array of interrupt handles
3797 3802 */
3798 3803 nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3799 3804 nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3800 3805
3801 3806 /*
3802 3807 * call ddi_intr_alloc()
3803 3808 */
3804 3809 rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3805 3810 inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3806 3811
3807 3812 if ((rc != DDI_SUCCESS) || (actual == 0)) {
3808 3813 nv_cmn_err(CE_WARN, nvc, NULL,
3809 3814 "ddi_intr_alloc() failed, rc %d", rc);
3810 3815 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3811 3816
3812 3817 return (DDI_FAILURE);
3813 3818 }
3814 3819
3815 3820 if (actual < count) {
3816 3821 nv_cmn_err(CE_WARN, nvc, NULL,
3817 3822 "ddi_intr_alloc: requested: %d, received: %d",
3818 3823 count, actual);
3819 3824
3820 3825 goto failure;
3821 3826 }
3822 3827
3823 3828 nvc->nvc_intr_cnt = actual;
3824 3829
3825 3830 /*
3826 3831 * get intr priority
3827 3832 */
3828 3833 if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3829 3834 DDI_SUCCESS) {
3830 3835 nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3831 3836
3832 3837 goto failure;
3833 3838 }
3834 3839
3835 3840 /*
3836 3841 * Test for high level mutex
3837 3842 */
3838 3843 if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3839 3844 nv_cmn_err(CE_WARN, nvc, NULL,
3840 3845 "nv_add_legacy_intrs: high level intr not supported");
3841 3846
3842 3847 goto failure;
3843 3848 }
3844 3849
3845 3850 for (x = 0; x < actual; x++) {
3846 3851 if (ddi_intr_add_handler(nvc->nvc_htable[x],
3847 3852 nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3848 3853 nv_cmn_err(CE_WARN, nvc, NULL,
3849 3854 "ddi_intr_add_handler() failed");
3850 3855
3851 3856 goto failure;
3852 3857 }
3853 3858 }
3854 3859
3855 3860 /*
3856 3861 * call ddi_intr_enable() for legacy interrupts
3857 3862 */
3858 3863 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3859 3864 (void) ddi_intr_enable(nvc->nvc_htable[x]);
3860 3865 }
3861 3866
3862 3867 return (DDI_SUCCESS);
3863 3868
3864 3869 failure:
3865 3870 /*
3866 3871 * free allocated intr and nvc_htable
3867 3872 */
3868 3873 for (y = 0; y < actual; y++) {
3869 3874 (void) ddi_intr_free(nvc->nvc_htable[y]);
3870 3875 }
3871 3876
3872 3877 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3873 3878
3874 3879 return (DDI_FAILURE);
3875 3880 }
3876 3881
3877 3882 #ifdef NV_MSI_SUPPORTED
3878 3883 /*
3879 3884 * configure MSI interrupts
3880 3885 */
3881 3886 static int
3882 3887 nv_add_msi_intrs(nv_ctl_t *nvc)
3883 3888 {
3884 3889 dev_info_t *devinfo = nvc->nvc_dip;
3885 3890 int count, avail, actual;
3886 3891 int x, y, rc, inum = 0;
3887 3892
3888 3893 NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_msi_intrs", NULL);
3889 3894
3890 3895 /*
3891 3896 * get number of interrupts
3892 3897 */
3893 3898 rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3894 3899 if ((rc != DDI_SUCCESS) || (count == 0)) {
3895 3900 nv_cmn_err(CE_WARN, nvc, NULL,
3896 3901 "ddi_intr_get_nintrs() failed, "
3897 3902 "rc %d count %d", rc, count);
3898 3903
3899 3904 return (DDI_FAILURE);
3900 3905 }
3901 3906
3902 3907 /*
3903 3908 * get number of available interrupts
3904 3909 */
3905 3910 rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3906 3911 if ((rc != DDI_SUCCESS) || (avail == 0)) {
3907 3912 nv_cmn_err(CE_WARN, nvc, NULL,
3908 3913 "ddi_intr_get_navail() failed, "
3909 3914 "rc %d avail %d", rc, avail);
3910 3915
3911 3916 return (DDI_FAILURE);
3912 3917 }
3913 3918
3914 3919 if (avail < count) {
3915 3920 nv_cmn_err(CE_WARN, nvc, NULL,
3916 3921 "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3917 3922 avail, count);
3918 3923 }
3919 3924
3920 3925 /*
3921 3926 * allocate an array of interrupt handles
3922 3927 */
3923 3928 nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3924 3929 nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3925 3930
3926 3931 rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3927 3932 inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3928 3933
3929 3934 if ((rc != DDI_SUCCESS) || (actual == 0)) {
3930 3935 nv_cmn_err(CE_WARN, nvc, NULL,
3931 3936 "ddi_intr_alloc() failed, rc %d", rc);
3932 3937 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3933 3938
3934 3939 return (DDI_FAILURE);
3935 3940 }
3936 3941
3937 3942 /*
3938 3943 * Use interrupt count returned or abort?
3939 3944 */
3940 3945 if (actual < count) {
3941 3946 NVLOG(NVDBG_INIT, nvc, NULL,
3942 3947 "Requested: %d, Received: %d", count, actual);
3943 3948 }
3944 3949
3945 3950 nvc->nvc_intr_cnt = actual;
3946 3951
3947 3952 /*
3948 3953 * get priority for first msi, assume remaining are all the same
3949 3954 */
3950 3955 if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3951 3956 DDI_SUCCESS) {
3952 3957 nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3953 3958
3954 3959 goto failure;
3955 3960 }
3956 3961
3957 3962 /*
3958 3963 * test for high level mutex
3959 3964 */
3960 3965 if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3961 3966 nv_cmn_err(CE_WARN, nvc, NULL,
3962 3967 "nv_add_msi_intrs: high level intr not supported");
3963 3968
3964 3969 goto failure;
3965 3970 }
3966 3971
3967 3972 /*
3968 3973 * Call ddi_intr_add_handler()
3969 3974 */
3970 3975 for (x = 0; x < actual; x++) {
3971 3976 if (ddi_intr_add_handler(nvc->nvc_htable[x],
3972 3977 nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3973 3978 nv_cmn_err(CE_WARN, nvc, NULL,
3974 3979 "ddi_intr_add_handler() failed");
3975 3980
3976 3981 goto failure;
3977 3982 }
3978 3983 }
3979 3984
3980 3985 (void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3981 3986
3982 3987 if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3983 3988 (void) ddi_intr_block_enable(nvc->nvc_htable,
3984 3989 nvc->nvc_intr_cnt);
3985 3990 } else {
3986 3991 /*
3987 3992 * Call ddi_intr_enable() for MSI non block enable
3988 3993 */
3989 3994 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3990 3995 (void) ddi_intr_enable(nvc->nvc_htable[x]);
3991 3996 }
3992 3997 }
3993 3998
3994 3999 return (DDI_SUCCESS);
3995 4000
3996 4001 failure:
3997 4002 /*
3998 4003 * free allocated intr and nvc_htable
3999 4004 */
4000 4005 for (y = 0; y < actual; y++) {
4001 4006 (void) ddi_intr_free(nvc->nvc_htable[y]);
4002 4007 }
4003 4008
4004 4009 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4005 4010
4006 4011 return (DDI_FAILURE);
4007 4012 }
4008 4013 #endif
4009 4014
4010 4015
4011 4016 static void
4012 4017 nv_rem_intrs(nv_ctl_t *nvc)
4013 4018 {
4014 4019 int x, i;
4015 4020 nv_port_t *nvp;
4016 4021
4017 4022 NVLOG(NVDBG_INIT, nvc, NULL, "nv_rem_intrs", NULL);
4018 4023
4019 4024 /*
4020 4025 * prevent controller from generating interrupts by
4021 4026 * masking them out. This is an extra precaution.
4022 4027 */
4023 4028 for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
4024 4029 nvp = (&nvc->nvc_port[i]);
4025 4030 mutex_enter(&nvp->nvp_mutex);
4026 4031 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
4027 4032 mutex_exit(&nvp->nvp_mutex);
4028 4033 }
4029 4034
4030 4035 /*
4031 4036 * disable all interrupts
4032 4037 */
4033 4038 if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
4034 4039 (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
4035 4040 (void) ddi_intr_block_disable(nvc->nvc_htable,
4036 4041 nvc->nvc_intr_cnt);
4037 4042 } else {
4038 4043 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4039 4044 (void) ddi_intr_disable(nvc->nvc_htable[x]);
4040 4045 }
4041 4046 }
4042 4047
4043 4048 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4044 4049 (void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
4045 4050 (void) ddi_intr_free(nvc->nvc_htable[x]);
4046 4051 }
4047 4052
4048 4053 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4049 4054 }
4050 4055
4051 4056
4052 4057 /*
4053 4058 * variable argument wrapper for cmn_err. prefixes the instance and port
4054 4059 * number if possible
4055 4060 */
4056 4061 static void
4057 4062 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, va_list ap,
4058 4063 boolean_t log_to_sata_ring)
4059 4064 {
4060 4065 char port[NV_STR_LEN];
4061 4066 char inst[NV_STR_LEN];
4062 4067 dev_info_t *dip;
4063 4068
4064 4069 if (nvc) {
4065 4070 (void) snprintf(inst, NV_STR_LEN, "inst%d ",
4066 4071 ddi_get_instance(nvc->nvc_dip));
4067 4072 dip = nvc->nvc_dip;
4068 4073 } else {
4069 4074 inst[0] = '\0';
4070 4075 }
4071 4076
4072 4077 if (nvp) {
4073 4078 (void) snprintf(port, NV_STR_LEN, "port%d",
4074 4079 nvp->nvp_port_num);
4075 4080 dip = nvp->nvp_ctlp->nvc_dip;
4076 4081 } else {
4077 4082 port[0] = '\0';
4078 4083 }
4079 4084
4080 4085 mutex_enter(&nv_log_mutex);
4081 4086
4082 4087 (void) sprintf(nv_log_buf, "%s%s%s", inst, port,
4083 4088 (inst[0]|port[0] ? ": " :""));
4084 4089
4085 4090 (void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4086 4091 NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4087 4092
4088 4093 /*
4089 4094 * Log to console or log to file, depending on
4090 4095 * nv_log_to_console setting.
4091 4096 */
4092 4097 if (nv_log_to_console) {
4093 4098 if (nv_prom_print) {
4094 4099 prom_printf("%s\n", nv_log_buf);
4095 4100 } else {
4096 4101 cmn_err(ce, "%s\n", nv_log_buf);
4097 4102 }
4098 4103 } else {
4099 4104 cmn_err(ce, "!%s", nv_log_buf);
4100 4105 }
4101 4106
4102 4107 if (log_to_sata_ring == B_TRUE) {
4103 4108 (void) sprintf(nv_log_buf, "%s%s", port, (port[0] ? ": " :""));
4104 4109
4105 4110 (void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4106 4111 NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4107 4112
4108 4113 sata_trace_debug(dip, nv_log_buf);
4109 4114 }
4110 4115
4111 4116 mutex_exit(&nv_log_mutex);
4112 4117 }
4113 4118
4114 4119
4115 4120 /*
4116 4121 * wrapper for cmn_err
4117 4122 */
4118 4123 static void
4119 4124 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4120 4125 {
4121 4126 va_list ap;
4122 4127
4123 4128 va_start(ap, fmt);
4124 4129 nv_vcmn_err(ce, nvc, nvp, fmt, ap, B_TRUE);
4125 4130 va_end(ap);
4126 4131 }
4127 4132
4128 4133
4129 4134 static void
4130 4135 nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...)
4131 4136 {
4132 4137 va_list ap;
4133 4138
4134 4139 if (nv_log_to_cmn_err == B_TRUE) {
4135 4140 va_start(ap, fmt);
4136 4141 nv_vcmn_err(CE_CONT, nvc, nvp, fmt, ap, B_FALSE);
4137 4142 va_end(ap);
4138 4143
4139 4144 }
4140 4145
4141 4146 va_start(ap, fmt);
4142 4147
4143 4148 if (nvp == NULL && nvc == NULL) {
4144 4149 sata_vtrace_debug(NULL, fmt, ap);
4145 4150 va_end(ap);
4146 4151
4147 4152 return;
4148 4153 }
4149 4154
4150 4155 if (nvp == NULL && nvc != NULL) {
4151 4156 sata_vtrace_debug(nvc->nvc_dip, fmt, ap);
4152 4157 va_end(ap);
4153 4158
4154 4159 return;
4155 4160 }
4156 4161
4157 4162 /*
4158 4163 * nvp is not NULL, but nvc might be. Reference nvp for both
4159 4164 * port and dip, to get the port number prefixed on the
4160 4165 * message.
4161 4166 */
4162 4167 mutex_enter(&nv_log_mutex);
4163 4168
4164 4169 (void) snprintf(nv_log_buf, NV_LOGBUF_LEN, "port%d: %s",
4165 4170 nvp->nvp_port_num, fmt);
4166 4171
4167 4172 sata_vtrace_debug(nvp->nvp_ctlp->nvc_dip, nv_log_buf, ap);
4168 4173
4169 4174 mutex_exit(&nv_log_mutex);
4170 4175
4171 4176 va_end(ap);
4172 4177 }
4173 4178
4174 4179
4175 4180 /*
4176 4181 * program registers which are common to all commands
4177 4182 */
4178 4183 static void
4179 4184 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4180 4185 {
4181 4186 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4182 4187 sata_pkt_t *spkt;
4183 4188 sata_cmd_t *satacmd;
4184 4189 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4185 4190 uint8_t cmd, ncq = B_FALSE;
4186 4191
4187 4192 spkt = nv_slotp->nvslot_spkt;
4188 4193 satacmd = &spkt->satapkt_cmd;
4189 4194 cmd = satacmd->satacmd_cmd_reg;
4190 4195
4191 4196 ASSERT(nvp->nvp_slot);
4192 4197
4193 4198 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4194 4199 (cmd == SATAC_READ_FPDMA_QUEUED)) {
4195 4200 ncq = B_TRUE;
4196 4201 }
4197 4202
4198 4203 /*
4199 4204 * select the drive
4200 4205 */
4201 4206 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4202 4207
4203 4208 /*
4204 4209 * make certain the drive selected
4205 4210 */
4206 4211 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4207 4212 NV_SEC2USEC(5), 0) == B_FALSE) {
4208 4213
4209 4214 return;
4210 4215 }
4211 4216
4212 4217 switch (spkt->satapkt_cmd.satacmd_addr_type) {
4213 4218
4214 4219 case ATA_ADDR_LBA:
4215 4220 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode",
4216 4221 NULL);
4217 4222
4218 4223 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4219 4224 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4220 4225 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4221 4226 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4222 4227 nv_put8(cmdhdl, nvp->nvp_feature,
4223 4228 satacmd->satacmd_features_reg);
4224 4229
4225 4230
4226 4231 break;
4227 4232
4228 4233 case ATA_ADDR_LBA28:
4229 4234 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4230 4235 "ATA_ADDR_LBA28 mode", NULL);
4231 4236 /*
4232 4237 * NCQ only uses 48-bit addressing
4233 4238 */
4234 4239 ASSERT(ncq != B_TRUE);
4235 4240
4236 4241 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4237 4242 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4238 4243 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4239 4244 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4240 4245 nv_put8(cmdhdl, nvp->nvp_feature,
4241 4246 satacmd->satacmd_features_reg);
4242 4247
4243 4248 break;
4244 4249
4245 4250 case ATA_ADDR_LBA48:
4246 4251 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4247 4252 "ATA_ADDR_LBA48 mode", NULL);
4248 4253
4249 4254 /*
4250 4255 * for NCQ, tag goes into count register and real sector count
4251 4256 * into features register. The sata module does the translation
4252 4257 * in the satacmd.
4253 4258 */
4254 4259 if (ncq == B_TRUE) {
4255 4260 nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4256 4261 } else {
4257 4262 nv_put8(cmdhdl, nvp->nvp_count,
4258 4263 satacmd->satacmd_sec_count_msb);
4259 4264 nv_put8(cmdhdl, nvp->nvp_count,
4260 4265 satacmd->satacmd_sec_count_lsb);
4261 4266 }
4262 4267
4263 4268 nv_put8(cmdhdl, nvp->nvp_feature,
4264 4269 satacmd->satacmd_features_reg_ext);
4265 4270 nv_put8(cmdhdl, nvp->nvp_feature,
4266 4271 satacmd->satacmd_features_reg);
4267 4272
4268 4273 /*
4269 4274 * send the high-order half first
4270 4275 */
4271 4276 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4272 4277 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4273 4278 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4274 4279
4275 4280 /*
4276 4281 * Send the low-order half
4277 4282 */
4278 4283 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4279 4284 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4280 4285 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4281 4286
4282 4287 break;
4283 4288
4284 4289 case 0:
4285 4290 /*
4286 4291 * non-media access commands such as identify and features
4287 4292 * take this path.
4288 4293 */
4289 4294 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4290 4295 nv_put8(cmdhdl, nvp->nvp_feature,
4291 4296 satacmd->satacmd_features_reg);
4292 4297 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4293 4298 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4294 4299 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4295 4300
4296 4301 break;
4297 4302
4298 4303 default:
4299 4304 break;
4300 4305 }
4301 4306
4302 4307 ASSERT(nvp->nvp_slot);
4303 4308 }
4304 4309
4305 4310
4306 4311 /*
4307 4312 * start a command that involves no media access
4308 4313 */
4309 4314 static int
4310 4315 nv_start_nodata(nv_port_t *nvp, int slot)
4311 4316 {
4312 4317 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4313 4318 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4314 4319 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4315 4320 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4316 4321
4317 4322 nv_program_taskfile_regs(nvp, slot);
4318 4323
4319 4324 /*
4320 4325 * This next one sets the controller in motion
4321 4326 */
4322 4327 nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4323 4328
4324 4329 return (SATA_TRAN_ACCEPTED);
4325 4330 }
4326 4331
4327 4332
4328 4333 static int
4329 4334 nv_bm_status_clear(nv_port_t *nvp)
4330 4335 {
4331 4336 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4332 4337 uchar_t status, ret;
4333 4338
4334 4339 /*
4335 4340 * Get the current BM status
4336 4341 */
4337 4342 ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4338 4343
4339 4344 status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4340 4345
4341 4346 /*
4342 4347 * Clear the latches (and preserve the other bits)
4343 4348 */
4344 4349 nv_put8(bmhdl, nvp->nvp_bmisx, status);
4345 4350
4346 4351 return (ret);
4347 4352 }
4348 4353
4349 4354
4350 4355 /*
4351 4356 * program the bus master DMA engine with the PRD address for
4352 4357 * the active slot command, and start the DMA engine.
4353 4358 */
4354 4359 static void
4355 4360 nv_start_dma_engine(nv_port_t *nvp, int slot)
4356 4361 {
4357 4362 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4358 4363 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4359 4364 uchar_t direction;
4360 4365
4361 4366 ASSERT(nv_slotp->nvslot_spkt != NULL);
4362 4367
4363 4368 if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4364 4369 == SATA_DIR_READ) {
4365 4370 direction = BMICX_RWCON_WRITE_TO_MEMORY;
4366 4371 } else {
4367 4372 direction = BMICX_RWCON_READ_FROM_MEMORY;
4368 4373 }
4369 4374
4370 4375 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4371 4376 "nv_start_dma_engine entered", NULL);
4372 4377
4373 4378 #if NOT_USED
4374 4379 /*
4375 4380 * NOT NEEDED. Left here of historical reason.
4376 4381 * Reset the controller's interrupt and error status bits.
4377 4382 */
4378 4383 (void) nv_bm_status_clear(nvp);
4379 4384 #endif
4380 4385 /*
4381 4386 * program the PRD table physical start address
4382 4387 */
4383 4388 nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4384 4389
4385 4390 /*
4386 4391 * set the direction control and start the DMA controller
4387 4392 */
4388 4393 nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4389 4394 }
4390 4395
4391 4396 /*
4392 4397 * start dma command, either in or out
4393 4398 */
4394 4399 static int
4395 4400 nv_start_dma(nv_port_t *nvp, int slot)
4396 4401 {
4397 4402 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4398 4403 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4399 4404 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4400 4405 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4401 4406 uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4402 4407 #ifdef NCQ
4403 4408 uint8_t ncq = B_FALSE;
4404 4409 #endif
4405 4410 ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4406 4411 uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4407 4412 int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4408 4413 ddi_dma_cookie_t *srcp = sata_cmdp->satacmd_dma_cookie_list;
4409 4414
4410 4415 ASSERT(sg_count != 0);
4411 4416
4412 4417 if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4413 4418 nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4414 4419 " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4415 4420 sata_cmdp->satacmd_num_dma_cookies);
4416 4421
4417 4422 return (NV_FAILURE);
4418 4423 }
4419 4424
4420 4425 nv_program_taskfile_regs(nvp, slot);
4421 4426
4422 4427 /*
4423 4428 * start the drive in motion
4424 4429 */
4425 4430 nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4426 4431
4427 4432 /*
4428 4433 * the drive starts processing the transaction when the cmd register
4429 4434 * is written. This is done here before programming the DMA engine to
4430 4435 * parallelize and save some time. In the event that the drive is ready
4431 4436 * before DMA, it will wait.
4432 4437 */
4433 4438 #ifdef NCQ
4434 4439 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4435 4440 (cmd == SATAC_READ_FPDMA_QUEUED)) {
4436 4441 ncq = B_TRUE;
4437 4442 }
4438 4443 #endif
4439 4444
4440 4445 /*
4441 4446 * copy the PRD list to PRD table in DMA accessible memory
4442 4447 * so that the controller can access it.
4443 4448 */
4444 4449 for (idx = 0; idx < sg_count; idx++, srcp++) {
4445 4450 uint32_t size;
4446 4451
4447 4452 nv_put32(sghdl, dstp++, srcp->dmac_address);
4448 4453
4449 4454 /* Set the number of bytes to transfer, 0 implies 64KB */
4450 4455 size = srcp->dmac_size;
4451 4456 if (size == 0x10000)
4452 4457 size = 0;
4453 4458
4454 4459 /*
4455 4460 * If this is a 40-bit address, copy bits 32-40 of the
4456 4461 * physical address to bits 16-24 of the PRD count.
4457 4462 */
4458 4463 if (srcp->dmac_laddress > UINT32_MAX) {
4459 4464 size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4460 4465 }
4461 4466
4462 4467 /*
4463 4468 * set the end of table flag for the last entry
4464 4469 */
4465 4470 if (idx == (sg_count - 1)) {
4466 4471 size |= PRDE_EOT;
4467 4472 }
4468 4473
4469 4474 nv_put32(sghdl, dstp++, size);
4470 4475 }
4471 4476
4472 4477 (void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4473 4478 sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4474 4479
4475 4480 nv_start_dma_engine(nvp, slot);
4476 4481
4477 4482 #ifdef NCQ
4478 4483 /*
4479 4484 * optimization: for SWNCQ, start DMA engine if this is the only
4480 4485 * command running. Preliminary NCQ efforts indicated this needs
4481 4486 * more debugging.
4482 4487 *
4483 4488 * if (nvp->nvp_ncq_run <= 1)
4484 4489 */
4485 4490
4486 4491 if (ncq == B_FALSE) {
4487 4492 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4488 4493 "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4489 4494 " cmd = %X", non_ncq_commands++, cmd);
4490 4495 nv_start_dma_engine(nvp, slot);
4491 4496 } else {
4492 4497 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "NCQ, so program "
4493 4498 "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd);
4494 4499 }
4495 4500 #endif /* NCQ */
4496 4501
4497 4502 return (SATA_TRAN_ACCEPTED);
4498 4503 }
4499 4504
4500 4505
4501 4506 /*
4502 4507 * start a PIO data-in ATA command
4503 4508 */
4504 4509 static int
4505 4510 nv_start_pio_in(nv_port_t *nvp, int slot)
4506 4511 {
4507 4512
4508 4513 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4509 4514 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4510 4515 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4511 4516
4512 4517 nv_program_taskfile_regs(nvp, slot);
4513 4518
4514 4519 /*
4515 4520 * This next one sets the drive in motion
4516 4521 */
4517 4522 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4518 4523
4519 4524 return (SATA_TRAN_ACCEPTED);
4520 4525 }
4521 4526
4522 4527
4523 4528 /*
4524 4529 * start a PIO data-out ATA command
4525 4530 */
4526 4531 static int
4527 4532 nv_start_pio_out(nv_port_t *nvp, int slot)
4528 4533 {
4529 4534 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4530 4535 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4531 4536 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4532 4537
4533 4538 nv_program_taskfile_regs(nvp, slot);
4534 4539
4535 4540 /*
4536 4541 * this next one sets the drive in motion
4537 4542 */
4538 4543 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4539 4544
4540 4545 /*
4541 4546 * wait for the busy bit to settle
4542 4547 */
4543 4548 NV_DELAY_NSEC(400);
4544 4549
4545 4550 /*
4546 4551 * wait for the drive to assert DRQ to send the first chunk
4547 4552 * of data. Have to busy wait because there's no interrupt for
4548 4553 * the first chunk. This is bad... uses a lot of cycles if the
4549 4554 * drive responds too slowly or if the wait loop granularity
4550 4555 * is too large. It's even worse if the drive is defective and
4551 4556 * the loop times out.
4552 4557 */
4553 4558 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4554 4559 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4555 4560 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4556 4561 4000000, 0) == B_FALSE) {
4557 4562 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4558 4563
4559 4564 goto error;
4560 4565 }
4561 4566
4562 4567 /*
4563 4568 * send the first block.
4564 4569 */
4565 4570 nv_intr_pio_out(nvp, nv_slotp);
4566 4571
4567 4572 /*
4568 4573 * If nvslot_flags is not set to COMPLETE yet, then processing
4569 4574 * is OK so far, so return. Otherwise, fall into error handling
4570 4575 * below.
4571 4576 */
4572 4577 if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4573 4578
4574 4579 return (SATA_TRAN_ACCEPTED);
4575 4580 }
4576 4581
4577 4582 error:
4578 4583 /*
4579 4584 * there was an error so reset the device and complete the packet.
4580 4585 */
4581 4586 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4582 4587 nv_complete_io(nvp, spkt, 0);
4583 4588 nv_reset(nvp, "pio_out");
4584 4589
4585 4590 return (SATA_TRAN_PORT_ERROR);
4586 4591 }
4587 4592
4588 4593
4589 4594 /*
4590 4595 * start a ATAPI Packet command (PIO data in or out)
4591 4596 */
4592 4597 static int
4593 4598 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4594 4599 {
4595 4600 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4596 4601 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4597 4602 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4598 4603 sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4599 4604
4600 4605 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4601 4606 "nv_start_pkt_pio: start", NULL);
4602 4607
4603 4608 /*
4604 4609 * Write the PACKET command to the command register. Normally
4605 4610 * this would be done through nv_program_taskfile_regs(). It
4606 4611 * is done here because some values need to be overridden.
4607 4612 */
4608 4613
4609 4614 /* select the drive */
4610 4615 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4611 4616
4612 4617 /* make certain the drive selected */
4613 4618 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4614 4619 NV_SEC2USEC(5), 0) == B_FALSE) {
4615 4620 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4616 4621 "nv_start_pkt_pio: drive select failed", NULL);
4617 4622 return (SATA_TRAN_PORT_ERROR);
4618 4623 }
4619 4624
4620 4625 /*
4621 4626 * The command is always sent via PIO, despite whatever the SATA
4622 4627 * common module sets in the command. Overwrite the DMA bit to do this.
4623 4628 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4624 4629 */
4625 4630 nv_put8(cmdhdl, nvp->nvp_feature, 0); /* deassert DMA and OVL */
4626 4631
4627 4632 /* set appropriately by the sata common module */
4628 4633 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4629 4634 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4630 4635 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4631 4636 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4632 4637
4633 4638 /* initiate the command by writing the command register last */
4634 4639 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4635 4640
4636 4641 /* Give the host controller time to do its thing */
4637 4642 NV_DELAY_NSEC(400);
4638 4643
4639 4644 /*
4640 4645 * Wait for the device to indicate that it is ready for the command
4641 4646 * ATAPI protocol state - HP0: Check_Status_A
4642 4647 */
4643 4648
4644 4649 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4645 4650 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4646 4651 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4647 4652 4000000, 0) == B_FALSE) {
4648 4653 /*
4649 4654 * Either an error or device fault occurred or the wait
4650 4655 * timed out. According to the ATAPI protocol, command
4651 4656 * completion is also possible. Other implementations of
4652 4657 * this protocol don't handle this last case, so neither
4653 4658 * does this code.
4654 4659 */
4655 4660
4656 4661 if (nv_get8(cmdhdl, nvp->nvp_status) &
4657 4662 (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4658 4663 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4659 4664
4660 4665 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4661 4666 "nv_start_pkt_pio: device error (HP0)", NULL);
4662 4667 } else {
4663 4668 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4664 4669
4665 4670 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4666 4671 "nv_start_pkt_pio: timeout (HP0)", NULL);
4667 4672 }
4668 4673
4669 4674 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4670 4675 nv_complete_io(nvp, spkt, 0);
4671 4676 nv_reset(nvp, "start_pkt_pio");
4672 4677
4673 4678 return (SATA_TRAN_PORT_ERROR);
4674 4679 }
4675 4680
4676 4681 /*
4677 4682 * Put the ATAPI command in the data register
4678 4683 * ATAPI protocol state - HP1: Send_Packet
4679 4684 */
4680 4685
4681 4686 ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4682 4687 (ushort_t *)nvp->nvp_data,
4683 4688 (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4684 4689
4685 4690 /*
4686 4691 * See you in nv_intr_pkt_pio.
4687 4692 * ATAPI protocol state - HP3: INTRQ_wait
4688 4693 */
4689 4694
4690 4695 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4691 4696 "nv_start_pkt_pio: exiting into HP3", NULL);
4692 4697
4693 4698 return (SATA_TRAN_ACCEPTED);
4694 4699 }
4695 4700
4696 4701
4697 4702 /*
4698 4703 * Interrupt processing for a non-data ATA command.
4699 4704 */
4700 4705 static void
4701 4706 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4702 4707 {
4703 4708 uchar_t status;
4704 4709 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4705 4710 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4706 4711 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4707 4712 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4708 4713
4709 4714 NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered", NULL);
4710 4715
4711 4716 status = nv_get8(cmdhdl, nvp->nvp_status);
4712 4717
4713 4718 /*
4714 4719 * check for errors
4715 4720 */
4716 4721 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4717 4722 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4718 4723 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4719 4724 nvp->nvp_altstatus);
4720 4725 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4721 4726 } else {
4722 4727 spkt->satapkt_reason = SATA_PKT_COMPLETED;
4723 4728 }
4724 4729
4725 4730 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4726 4731 }
4727 4732
4728 4733
4729 4734 /*
4730 4735 * ATA command, PIO data in
4731 4736 */
4732 4737 static void
4733 4738 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4734 4739 {
4735 4740 uchar_t status;
4736 4741 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4737 4742 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4738 4743 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4739 4744 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4740 4745 int count;
4741 4746
4742 4747 status = nv_get8(cmdhdl, nvp->nvp_status);
4743 4748
4744 4749 if (status & SATA_STATUS_BSY) {
4745 4750 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4746 4751 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4747 4752 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4748 4753 nvp->nvp_altstatus);
4749 4754 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4750 4755 nv_reset(nvp, "intr_pio_in");
4751 4756
4752 4757 return;
4753 4758 }
4754 4759
4755 4760 /*
4756 4761 * check for errors
4757 4762 */
4758 4763 if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4759 4764 SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4760 4765 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4761 4766 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4762 4767 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4763 4768
4764 4769 return;
4765 4770 }
4766 4771
4767 4772 /*
4768 4773 * read the next chunk of data (if any)
4769 4774 */
4770 4775 count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4771 4776
4772 4777 /*
4773 4778 * read count bytes
4774 4779 */
4775 4780 ASSERT(count != 0);
4776 4781
4777 4782 ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4778 4783 (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4779 4784
4780 4785 nv_slotp->nvslot_v_addr += count;
4781 4786 nv_slotp->nvslot_byte_count -= count;
4782 4787
4783 4788
4784 4789 if (nv_slotp->nvslot_byte_count != 0) {
4785 4790 /*
4786 4791 * more to transfer. Wait for next interrupt.
4787 4792 */
4788 4793 return;
4789 4794 }
4790 4795
4791 4796 /*
4792 4797 * transfer is complete. wait for the busy bit to settle.
4793 4798 */
4794 4799 NV_DELAY_NSEC(400);
4795 4800
4796 4801 spkt->satapkt_reason = SATA_PKT_COMPLETED;
4797 4802 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4798 4803 }
4799 4804
4800 4805
4801 4806 /*
4802 4807 * ATA command PIO data out
4803 4808 */
4804 4809 static void
4805 4810 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4806 4811 {
4807 4812 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4808 4813 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4809 4814 uchar_t status;
4810 4815 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4811 4816 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4812 4817 int count;
4813 4818
4814 4819 /*
4815 4820 * clear the IRQ
4816 4821 */
4817 4822 status = nv_get8(cmdhdl, nvp->nvp_status);
4818 4823
4819 4824 if (status & SATA_STATUS_BSY) {
4820 4825 /*
4821 4826 * this should not happen
4822 4827 */
4823 4828 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4824 4829 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4825 4830 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4826 4831 nvp->nvp_altstatus);
4827 4832 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4828 4833
4829 4834 return;
4830 4835 }
4831 4836
4832 4837 /*
4833 4838 * check for errors
4834 4839 */
4835 4840 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4836 4841 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4837 4842 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4838 4843 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4839 4844
4840 4845 return;
4841 4846 }
4842 4847
4843 4848 /*
4844 4849 * this is the condition which signals the drive is
4845 4850 * no longer ready to transfer. Likely that the transfer
4846 4851 * completed successfully, but check that byte_count is
4847 4852 * zero.
4848 4853 */
4849 4854 if ((status & SATA_STATUS_DRQ) == 0) {
4850 4855
4851 4856 if (nv_slotp->nvslot_byte_count == 0) {
4852 4857 /*
4853 4858 * complete; successful transfer
4854 4859 */
4855 4860 spkt->satapkt_reason = SATA_PKT_COMPLETED;
4856 4861 } else {
4857 4862 /*
4858 4863 * error condition, incomplete transfer
4859 4864 */
4860 4865 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4861 4866 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4862 4867 }
4863 4868 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4864 4869
4865 4870 return;
4866 4871 }
4867 4872
4868 4873 /*
4869 4874 * write the next chunk of data
4870 4875 */
4871 4876 count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4872 4877
4873 4878 /*
4874 4879 * read or write count bytes
4875 4880 */
4876 4881
4877 4882 ASSERT(count != 0);
4878 4883
4879 4884 ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4880 4885 (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4881 4886
4882 4887 nv_slotp->nvslot_v_addr += count;
4883 4888 nv_slotp->nvslot_byte_count -= count;
4884 4889 }
4885 4890
4886 4891
4887 4892 /*
4888 4893 * ATAPI PACKET command, PIO in/out interrupt
4889 4894 *
4890 4895 * Under normal circumstances, one of four different interrupt scenarios
4891 4896 * will result in this function being called:
4892 4897 *
4893 4898 * 1. Packet command data transfer
4894 4899 * 2. Packet command completion
4895 4900 * 3. Request sense data transfer
4896 4901 * 4. Request sense command completion
4897 4902 */
4898 4903 static void
4899 4904 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4900 4905 {
4901 4906 uchar_t status;
4902 4907 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4903 4908 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4904 4909 int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4905 4910 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4906 4911 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4907 4912 uint16_t ctlr_count;
4908 4913 int count;
4909 4914
4910 4915 /* ATAPI protocol state - HP2: Check_Status_B */
4911 4916
4912 4917 status = nv_get8(cmdhdl, nvp->nvp_status);
4913 4918 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4914 4919 "nv_intr_pkt_pio: status 0x%x", status);
4915 4920
4916 4921 if (status & SATA_STATUS_BSY) {
4917 4922 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4918 4923 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4919 4924 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4920 4925 } else {
4921 4926 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4922 4927 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4923 4928 nv_reset(nvp, "intr_pkt_pio");
4924 4929 }
4925 4930
4926 4931 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4927 4932 "nv_intr_pkt_pio: busy - status 0x%x", status);
4928 4933
4929 4934 return;
4930 4935 }
4931 4936
4932 4937 if ((status & SATA_STATUS_DF) != 0) {
4933 4938 /*
4934 4939 * On device fault, just clean up and bail. Request sense
4935 4940 * will just default to its NO SENSE initialized value.
4936 4941 */
4937 4942
4938 4943 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4939 4944 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4940 4945 }
4941 4946
4942 4947 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4943 4948 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4944 4949
4945 4950 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4946 4951 nvp->nvp_altstatus);
4947 4952 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4948 4953 nvp->nvp_error);
4949 4954
4950 4955 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4951 4956 "nv_intr_pkt_pio: device fault", NULL);
4952 4957
4953 4958 return;
4954 4959 }
4955 4960
4956 4961 if ((status & SATA_STATUS_ERR) != 0) {
4957 4962 /*
4958 4963 * On command error, figure out whether we are processing a
4959 4964 * request sense. If so, clean up and bail. Otherwise,
4960 4965 * do a REQUEST SENSE.
4961 4966 */
4962 4967
4963 4968 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4964 4969 nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4965 4970 if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4966 4971 NV_FAILURE) {
4967 4972 nv_copy_registers(nvp, &spkt->satapkt_device,
4968 4973 spkt);
4969 4974 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4970 4975 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4971 4976 }
4972 4977
4973 4978 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4974 4979 nvp->nvp_altstatus);
4975 4980 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4976 4981 nvp->nvp_error);
4977 4982 } else {
4978 4983 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4979 4984 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4980 4985
4981 4986 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4982 4987 }
4983 4988
4984 4989 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4985 4990 "nv_intr_pkt_pio: error (status 0x%x)", status);
4986 4991
4987 4992 return;
4988 4993 }
4989 4994
4990 4995 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4991 4996 /*
4992 4997 * REQUEST SENSE command processing
4993 4998 */
4994 4999
4995 5000 if ((status & (SATA_STATUS_DRQ)) != 0) {
4996 5001 /* ATAPI state - HP4: Transfer_Data */
4997 5002
4998 5003 /* read the byte count from the controller */
4999 5004 ctlr_count =
5000 5005 (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5001 5006 ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5002 5007
5003 5008 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5004 5009 "nv_intr_pkt_pio: ctlr byte count - %d",
5005 5010 ctlr_count);
5006 5011
5007 5012 if (ctlr_count == 0) {
5008 5013 /* no data to transfer - some devices do this */
5009 5014
5010 5015 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5011 5016 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5012 5017
5013 5018 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5014 5019 "nv_intr_pkt_pio: done (no data)", NULL);
5015 5020
5016 5021 return;
5017 5022 }
5018 5023
5019 5024 count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
5020 5025
5021 5026 /* transfer the data */
5022 5027 ddi_rep_get16(cmdhdl,
5023 5028 (ushort_t *)nv_slotp->nvslot_rqsense_buff,
5024 5029 (ushort_t *)nvp->nvp_data, (count >> 1),
5025 5030 DDI_DEV_NO_AUTOINCR);
5026 5031
5027 5032 /* consume residual bytes */
5028 5033 ctlr_count -= count;
5029 5034
5030 5035 if (ctlr_count > 0) {
5031 5036 for (; ctlr_count > 0; ctlr_count -= 2)
5032 5037 (void) ddi_get16(cmdhdl,
5033 5038 (ushort_t *)nvp->nvp_data);
5034 5039 }
5035 5040
5036 5041 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5037 5042 "nv_intr_pkt_pio: transition to HP2", NULL);
5038 5043 } else {
5039 5044 /* still in ATAPI state - HP2 */
5040 5045
5041 5046 /*
5042 5047 * In order to avoid clobbering the rqsense data
5043 5048 * set by the SATA common module, the sense data read
5044 5049 * from the device is put in a separate buffer and
5045 5050 * copied into the packet after the request sense
5046 5051 * command successfully completes.
5047 5052 */
5048 5053 bcopy(nv_slotp->nvslot_rqsense_buff,
5049 5054 spkt->satapkt_cmd.satacmd_rqsense,
5050 5055 SATA_ATAPI_RQSENSE_LEN);
5051 5056
5052 5057 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5053 5058 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5054 5059
5055 5060 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5056 5061 "nv_intr_pkt_pio: request sense done", NULL);
5057 5062 }
5058 5063
5059 5064 return;
5060 5065 }
5061 5066
5062 5067 /*
5063 5068 * Normal command processing
5064 5069 */
5065 5070
5066 5071 if ((status & (SATA_STATUS_DRQ)) != 0) {
5067 5072 /* ATAPI protocol state - HP4: Transfer_Data */
5068 5073
5069 5074 /* read the byte count from the controller */
5070 5075 ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5071 5076 ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5072 5077
5073 5078 if (ctlr_count == 0) {
5074 5079 /* no data to transfer - some devices do this */
5075 5080
5076 5081 spkt->satapkt_reason = SATA_PKT_COMPLETED;
5077 5082 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5078 5083
5079 5084 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5080 5085 "nv_intr_pkt_pio: done (no data)", NULL);
5081 5086
5082 5087 return;
5083 5088 }
5084 5089
5085 5090 count = min(ctlr_count, nv_slotp->nvslot_byte_count);
5086 5091
5087 5092 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5088 5093 "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count);
5089 5094
5090 5095 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5091 5096 "nv_intr_pkt_pio: byte_count 0x%x",
5092 5097 nv_slotp->nvslot_byte_count);
5093 5098
5094 5099 /* transfer the data */
5095 5100
5096 5101 if (direction == SATA_DIR_READ) {
5097 5102 ddi_rep_get16(cmdhdl,
5098 5103 (ushort_t *)nv_slotp->nvslot_v_addr,
5099 5104 (ushort_t *)nvp->nvp_data, (count >> 1),
5100 5105 DDI_DEV_NO_AUTOINCR);
5101 5106
5102 5107 ctlr_count -= count;
5103 5108
5104 5109 if (ctlr_count > 0) {
5105 5110 /* consume remaining bytes */
5106 5111
5107 5112 for (; ctlr_count > 0;
5108 5113 ctlr_count -= 2)
5109 5114 (void) ddi_get16(cmdhdl,
5110 5115 (ushort_t *)nvp->nvp_data);
5111 5116
5112 5117 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5113 5118 "nv_intr_pkt_pio: bytes remained", NULL);
5114 5119 }
5115 5120 } else {
5116 5121 ddi_rep_put16(cmdhdl,
5117 5122 (ushort_t *)nv_slotp->nvslot_v_addr,
5118 5123 (ushort_t *)nvp->nvp_data, (count >> 1),
5119 5124 DDI_DEV_NO_AUTOINCR);
5120 5125 }
5121 5126
5122 5127 nv_slotp->nvslot_v_addr += count;
5123 5128 nv_slotp->nvslot_byte_count -= count;
5124 5129
5125 5130 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5126 5131 "nv_intr_pkt_pio: transition to HP2", NULL);
5127 5132 } else {
5128 5133 /* still in ATAPI state - HP2 */
5129 5134
5130 5135 spkt->satapkt_reason = SATA_PKT_COMPLETED;
5131 5136 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5132 5137
5133 5138 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5134 5139 "nv_intr_pkt_pio: done", NULL);
5135 5140 }
5136 5141 }
5137 5142
5138 5143
5139 5144 /*
5140 5145 * ATA command, DMA data in/out
5141 5146 */
5142 5147 static void
5143 5148 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5144 5149 {
5145 5150 uchar_t status;
5146 5151 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5147 5152 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5148 5153 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5149 5154 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5150 5155 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5151 5156 uchar_t bmicx;
5152 5157 uchar_t bm_status;
5153 5158
5154 5159 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5155 5160
5156 5161 /*
5157 5162 * stop DMA engine.
5158 5163 */
5159 5164 bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5160 5165 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
5161 5166
5162 5167 /*
5163 5168 * get the status and clear the IRQ, and check for DMA error
5164 5169 */
5165 5170 status = nv_get8(cmdhdl, nvp->nvp_status);
5166 5171
5167 5172 /*
5168 5173 * check for drive errors
5169 5174 */
5170 5175 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5171 5176 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5172 5177 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5173 5178 (void) nv_bm_status_clear(nvp);
5174 5179
5175 5180 return;
5176 5181 }
5177 5182
5178 5183 bm_status = nv_bm_status_clear(nvp);
5179 5184
5180 5185 /*
5181 5186 * check for bus master errors
5182 5187 */
5183 5188
5184 5189 if (bm_status & BMISX_IDERR) {
5185 5190 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
5186 5191 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5187 5192 nvp->nvp_altstatus);
5188 5193 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5189 5194 nv_reset(nvp, "intr_dma");
5190 5195
5191 5196 return;
5192 5197 }
5193 5198
5194 5199 spkt->satapkt_reason = SATA_PKT_COMPLETED;
5195 5200 }
5196 5201
5197 5202
5198 5203 /*
5199 5204 * Wait for a register of a controller to achieve a specific state.
5200 5205 * To return normally, all the bits in the first sub-mask must be ON,
5201 5206 * all the bits in the second sub-mask must be OFF.
5202 5207 * If timeout_usec microseconds pass without the controller achieving
5203 5208 * the desired bit configuration, return TRUE, else FALSE.
5204 5209 *
5205 5210 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5206 5211 * occur for the first 250 us, then switch over to a sleeping wait.
5207 5212 *
5208 5213 */
5209 5214 int
5210 5215 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5211 5216 int type_wait)
5212 5217 {
5213 5218 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5214 5219 hrtime_t end, cur, start_sleep, start;
5215 5220 int first_time = B_TRUE;
5216 5221 ushort_t val;
5217 5222
5218 5223 for (;;) {
5219 5224 val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5220 5225
5221 5226 if ((val & onbits) == onbits && (val & offbits) == 0) {
5222 5227
5223 5228 return (B_TRUE);
5224 5229 }
5225 5230
5226 5231 cur = gethrtime();
5227 5232
5228 5233 /*
5229 5234 * store the start time and calculate the end
5230 5235 * time. also calculate "start_sleep" which is
5231 5236 * the point after which the driver will stop busy
5232 5237 * waiting and change to sleep waiting.
5233 5238 */
5234 5239 if (first_time) {
5235 5240 first_time = B_FALSE;
5236 5241 /*
5237 5242 * start and end are in nanoseconds
5238 5243 */
5239 5244 start = cur;
5240 5245 end = start + timeout_usec * 1000;
5241 5246 /*
5242 5247 * add 1 ms to start
5243 5248 */
5244 5249 start_sleep = start + 250000;
5245 5250
5246 5251 if (servicing_interrupt()) {
5247 5252 type_wait = NV_NOSLEEP;
5248 5253 }
5249 5254 }
5250 5255
5251 5256 if (cur > end) {
5252 5257
5253 5258 break;
5254 5259 }
5255 5260
5256 5261 if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5257 5262 #if ! defined(__lock_lint)
5258 5263 delay(1);
5259 5264 #endif
5260 5265 } else {
5261 5266 drv_usecwait(nv_usec_delay);
5262 5267 }
5263 5268 }
5264 5269
5265 5270 return (B_FALSE);
5266 5271 }
5267 5272
5268 5273
5269 5274 /*
5270 5275 * This is a slightly more complicated version that checks
5271 5276 * for error conditions and bails-out rather than looping
5272 5277 * until the timeout is exceeded.
5273 5278 *
5274 5279 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5275 5280 * occur for the first 250 us, then switch over to a sleeping wait.
5276 5281 */
5277 5282 int
5278 5283 nv_wait3(
5279 5284 nv_port_t *nvp,
5280 5285 uchar_t onbits1,
5281 5286 uchar_t offbits1,
5282 5287 uchar_t failure_onbits2,
5283 5288 uchar_t failure_offbits2,
5284 5289 uchar_t failure_onbits3,
5285 5290 uchar_t failure_offbits3,
5286 5291 uint_t timeout_usec,
5287 5292 int type_wait)
5288 5293 {
5289 5294 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5290 5295 hrtime_t end, cur, start_sleep, start;
5291 5296 int first_time = B_TRUE;
5292 5297 ushort_t val;
5293 5298
5294 5299 for (;;) {
5295 5300 val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5296 5301
5297 5302 /*
5298 5303 * check for expected condition
5299 5304 */
5300 5305 if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5301 5306
5302 5307 return (B_TRUE);
5303 5308 }
5304 5309
5305 5310 /*
5306 5311 * check for error conditions
5307 5312 */
5308 5313 if ((val & failure_onbits2) == failure_onbits2 &&
5309 5314 (val & failure_offbits2) == 0) {
5310 5315
5311 5316 return (B_FALSE);
5312 5317 }
5313 5318
5314 5319 if ((val & failure_onbits3) == failure_onbits3 &&
5315 5320 (val & failure_offbits3) == 0) {
5316 5321
5317 5322 return (B_FALSE);
5318 5323 }
5319 5324
5320 5325 /*
5321 5326 * store the start time and calculate the end
5322 5327 * time. also calculate "start_sleep" which is
5323 5328 * the point after which the driver will stop busy
5324 5329 * waiting and change to sleep waiting.
5325 5330 */
5326 5331 if (first_time) {
5327 5332 first_time = B_FALSE;
5328 5333 /*
5329 5334 * start and end are in nanoseconds
5330 5335 */
5331 5336 cur = start = gethrtime();
5332 5337 end = start + timeout_usec * 1000;
5333 5338 /*
5334 5339 * add 1 ms to start
5335 5340 */
5336 5341 start_sleep = start + 250000;
5337 5342
5338 5343 if (servicing_interrupt()) {
5339 5344 type_wait = NV_NOSLEEP;
5340 5345 }
5341 5346 } else {
5342 5347 cur = gethrtime();
5343 5348 }
5344 5349
5345 5350 if (cur > end) {
5346 5351
5347 5352 break;
5348 5353 }
5349 5354
5350 5355 if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5351 5356 #if ! defined(__lock_lint)
5352 5357 delay(1);
5353 5358 #endif
5354 5359 } else {
5355 5360 drv_usecwait(nv_usec_delay);
5356 5361 }
5357 5362 }
5358 5363
5359 5364 return (B_FALSE);
5360 5365 }
5361 5366
5362 5367
5363 5368 /*
5364 5369 * nv_port_state_change() reports the state of the port to the
5365 5370 * sata module by calling sata_hba_event_notify(). This
5366 5371 * function is called any time the state of the port is changed
5367 5372 */
5368 5373 static void
5369 5374 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5370 5375 {
5371 5376 sata_device_t sd;
5372 5377
5373 5378 NVLOG(NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5374 5379 "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5375 5380 "lbolt %ld (ticks)", event, addr_type, state, ddi_get_lbolt());
5376 5381
5377 5382 if (ddi_in_panic() != 0) {
5378 5383
5379 5384 return;
5380 5385 }
5381 5386
5382 5387 bzero((void *)&sd, sizeof (sata_device_t));
5383 5388 sd.satadev_rev = SATA_DEVICE_REV;
5384 5389 nv_copy_registers(nvp, &sd, NULL);
5385 5390
5386 5391 /*
5387 5392 * When NCQ is implemented sactive and snotific field need to be
5388 5393 * updated.
5389 5394 */
5390 5395 sd.satadev_addr.cport = nvp->nvp_port_num;
5391 5396 sd.satadev_addr.qual = addr_type;
5392 5397 sd.satadev_state = state;
5393 5398
5394 5399 sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5395 5400 }
5396 5401
5397 5402
5398 5403 /*
5399 5404 * Monitor reset progress and signature gathering.
5400 5405 */
5401 5406 static clock_t
5402 5407 nv_monitor_reset(nv_port_t *nvp)
5403 5408 {
5404 5409 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5405 5410 uint32_t sstatus;
5406 5411
5407 5412 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
5408 5413
5409 5414 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5410 5415
5411 5416 /*
5412 5417 * Check the link status. The link needs to be active before
5413 5418 * checking the link's status.
5414 5419 */
5415 5420 if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5416 5421 (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5417 5422 /*
5418 5423 * Either link is not active or there is no device
5419 5424 * If the link remains down for more than NV_LINK_EVENT_DOWN
5420 5425 * (milliseconds), abort signature acquisition and complete
5421 5426 * reset processing. The link will go down when COMRESET is
5422 5427 * sent by nv_reset().
5423 5428 */
5424 5429
5425 5430 if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5426 5431 NV_LINK_EVENT_DOWN) {
5427 5432
5428 5433 nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5429 5434 "nv_monitor_reset: no link - ending signature "
5430 5435 "acquisition; time after reset %ldms",
5431 5436 TICK_TO_MSEC(ddi_get_lbolt() -
5432 5437 nvp->nvp_reset_time));
5433 5438
5434 5439 DTRACE_PROBE(no_link_reset_giving_up_f);
5435 5440
5436 5441 /*
5437 5442 * If the drive was previously present and configured
5438 5443 * and then subsequently removed, then send a removal
5439 5444 * event to sata common module.
5440 5445 */
5441 5446 if (nvp->nvp_type != SATA_DTYPE_NONE) {
5442 5447 nv_port_state_change(nvp,
5443 5448 SATA_EVNT_DEVICE_DETACHED,
5444 5449 SATA_ADDR_CPORT, 0);
5445 5450 }
5446 5451
5447 5452 nvp->nvp_type = SATA_DTYPE_NONE;
5448 5453 nvp->nvp_signature = NV_NO_SIG;
5449 5454 nvp->nvp_state &= ~(NV_DEACTIVATED);
5450 5455
5451 5456 #ifdef SGPIO_SUPPORT
5452 5457 nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5453 5458 SGP_CTLR_PORT_TO_DRV(
5454 5459 nvp->nvp_ctlp->nvc_ctlr_num,
5455 5460 nvp->nvp_port_num));
5456 5461 #endif
5457 5462
5458 5463 cv_signal(&nvp->nvp_reset_cv);
5459 5464
5460 5465 return (0);
5461 5466 }
5462 5467
5463 5468 DTRACE_PROBE(link_lost_reset_keep_trying_p);
5464 5469
5465 5470 return (nvp->nvp_wait_sig);
5466 5471 }
5467 5472
5468 5473 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5469 5474 "nv_monitor_reset: link up. time since reset %ldms",
5470 5475 TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time));
5471 5476
5472 5477 nv_read_signature(nvp);
5473 5478
5474 5479
5475 5480 if (nvp->nvp_signature != NV_NO_SIG) {
5476 5481 /*
5477 5482 * signature has been acquired, send the appropriate
5478 5483 * event to the sata common module.
5479 5484 */
5480 5485 if (nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) {
5481 5486 char *source;
5482 5487
5483 5488 if (nvp->nvp_state & NV_HOTPLUG) {
5484 5489
5485 5490 source = "hotplugged";
5486 5491 nv_port_state_change(nvp,
5487 5492 SATA_EVNT_DEVICE_ATTACHED,
5488 5493 SATA_ADDR_CPORT, SATA_DSTATE_PWR_ACTIVE);
5489 5494 DTRACE_PROBE1(got_sig_for_hotplugged_device_h,
5490 5495 int, nvp->nvp_state);
5491 5496
5492 5497 } else {
5493 5498 source = "activated or attached";
5494 5499 DTRACE_PROBE1(got_sig_for_existing_device_h,
5495 5500 int, nvp->nvp_state);
5496 5501 }
5497 5502
5498 5503 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5499 5504 "signature acquired for %s device. sig:"
5500 5505 " 0x%x state: 0x%x nvp_type: 0x%x", source,
5501 5506 nvp->nvp_signature, nvp->nvp_state, nvp->nvp_type);
5502 5507
5503 5508
5504 5509 nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5505 5510
5506 5511 #ifdef SGPIO_SUPPORT
5507 5512 if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5508 5513 nv_sgp_drive_connect(nvp->nvp_ctlp,
5509 5514 SGP_CTLR_PORT_TO_DRV(
5510 5515 nvp->nvp_ctlp->nvc_ctlr_num,
5511 5516 nvp->nvp_port_num));
5512 5517 } else {
5513 5518 nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5514 5519 SGP_CTLR_PORT_TO_DRV(
5515 5520 nvp->nvp_ctlp->nvc_ctlr_num,
5516 5521 nvp->nvp_port_num));
5517 5522 }
5518 5523 #endif
5519 5524
5520 5525 cv_signal(&nvp->nvp_reset_cv);
5521 5526
5522 5527 return (0);
5523 5528 }
5524 5529
5525 5530 /*
5526 5531 * Since this was not an attach, it was a reset of an
5527 5532 * existing device
5528 5533 */
5529 5534 nvp->nvp_state &= ~NV_RESET;
5530 5535 nvp->nvp_state |= NV_RESTORE;
5531 5536
5532 5537
5533 5538
5534 5539 DTRACE_PROBE(got_signature_reset_complete_p);
5535 5540 DTRACE_PROBE1(nvp_signature_h, int, nvp->nvp_signature);
5536 5541 DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5537 5542
5538 5543 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5539 5544 "signature acquired reset complete. sig: 0x%x"
5540 5545 " state: 0x%x", nvp->nvp_signature, nvp->nvp_state);
5541 5546
5542 5547 /*
5543 5548 * interrupts may have been disabled so just make sure
5544 5549 * they are cleared and re-enabled.
5545 5550 */
5546 5551
5547 5552 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5548 5553 NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5549 5554
5550 5555 nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5551 5556 SATA_ADDR_DCPORT,
5552 5557 SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5553 5558
5554 5559 return (0);
5555 5560 }
5556 5561
5557 5562
5558 5563 if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >
5559 5564 NV_RETRY_RESET_SIG) {
5560 5565
5561 5566
5562 5567 if (nvp->nvp_reset_retry_count >= NV_MAX_RESET_RETRY) {
5563 5568
5564 5569 nvp->nvp_state |= NV_FAILED;
5565 5570 nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5566 5571
5567 5572 DTRACE_PROBE(reset_exceeded_waiting_for_sig_p);
5568 5573 DTRACE_PROBE(reset_exceeded_waiting_for_sig_f);
5569 5574 DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5570 5575 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5571 5576 "reset time exceeded waiting for sig nvp_state %x",
5572 5577 nvp->nvp_state);
5573 5578
5574 5579 nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
5575 5580 SATA_ADDR_CPORT, 0);
5576 5581
5577 5582 cv_signal(&nvp->nvp_reset_cv);
5578 5583
5579 5584 return (0);
5580 5585 }
5581 5586
5582 5587 nv_reset(nvp, "retry");
5583 5588
5584 5589 return (nvp->nvp_wait_sig);
5585 5590 }
5586 5591
5587 5592 /*
5588 5593 * signature not received, keep trying
5589 5594 */
5590 5595 DTRACE_PROBE(no_sig_keep_waiting_p);
5591 5596
5592 5597 /*
5593 5598 * double the wait time for sig since the last try but cap it off at
5594 5599 * 1 second.
5595 5600 */
5596 5601 nvp->nvp_wait_sig = nvp->nvp_wait_sig * 2;
5597 5602
5598 5603 return (nvp->nvp_wait_sig > NV_ONE_SEC ? NV_ONE_SEC :
5599 5604 nvp->nvp_wait_sig);
5600 5605 }
5601 5606
5602 5607
5603 5608 /*
5604 5609 * timeout processing:
5605 5610 *
5606 5611 * Check if any packets have crossed a timeout threshold. If so,
5607 5612 * abort the packet. This function is not NCQ-aware.
5608 5613 *
5609 5614 * If reset is in progress, call reset monitoring function.
5610 5615 *
5611 5616 * Timeout frequency may be lower for checking packet timeout
5612 5617 * and higher for reset monitoring.
5613 5618 *
5614 5619 */
5615 5620 static void
5616 5621 nv_timeout(void *arg)
5617 5622 {
5618 5623 nv_port_t *nvp = arg;
5619 5624 nv_slot_t *nv_slotp;
5620 5625 clock_t next_timeout_us = NV_ONE_SEC;
5621 5626 uint16_t int_status;
5622 5627 uint8_t status, bmstatus;
5623 5628 static int intr_warn_once = 0;
5624 5629 uint32_t serror;
5625 5630
5626 5631
5627 5632 ASSERT(nvp != NULL);
5628 5633
5629 5634 mutex_enter(&nvp->nvp_mutex);
5630 5635 nvp->nvp_timeout_id = 0;
5631 5636
5632 5637 if (nvp->nvp_state & (NV_DEACTIVATED|NV_FAILED)) {
5633 5638 next_timeout_us = 0;
5634 5639
5635 5640 goto finished;
5636 5641 }
5637 5642
5638 5643 if (nvp->nvp_state & NV_RESET) {
5639 5644 next_timeout_us = nv_monitor_reset(nvp);
5640 5645
5641 5646 goto finished;
5642 5647 }
5643 5648
5644 5649 if (nvp->nvp_state & NV_LINK_EVENT) {
5645 5650 boolean_t device_present = B_FALSE;
5646 5651 uint32_t sstatus;
5647 5652 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5648 5653
5649 5654 if (TICK_TO_USEC(ddi_get_lbolt() -
5650 5655 nvp->nvp_link_event_time) < NV_LINK_EVENT_SETTLE) {
5651 5656
5652 5657 next_timeout_us = 10 * NV_ONE_MSEC;
5653 5658
5654 5659 DTRACE_PROBE(link_event_set_no_timeout_keep_waiting_p);
5655 5660
5656 5661 goto finished;
5657 5662 }
5658 5663
5659 5664 DTRACE_PROBE(link_event_settled_now_process_p);
5660 5665
5661 5666 nvp->nvp_state &= ~NV_LINK_EVENT;
5662 5667
5663 5668 /*
5664 5669 * ck804 routinely reports the wrong hotplug/unplug event,
5665 5670 * and it's been seen on mcp55 when there are signal integrity
5666 5671 * issues. Therefore need to infer the event from the
5667 5672 * current link status.
5668 5673 */
5669 5674
5670 5675 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5671 5676
5672 5677 if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
5673 5678 (SSTATUS_GET_DET(sstatus) ==
5674 5679 SSTATUS_DET_DEVPRE_PHYCOM)) {
5675 5680 device_present = B_TRUE;
5676 5681 }
5677 5682
5678 5683 if ((nvp->nvp_signature != NV_NO_SIG) &&
5679 5684 (device_present == B_FALSE)) {
5680 5685
5681 5686 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5682 5687 "nv_timeout: device detached", NULL);
5683 5688
5684 5689 DTRACE_PROBE(device_detached_p);
5685 5690
5686 5691 (void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5687 5692 B_FALSE);
5688 5693
5689 5694 nv_port_state_change(nvp, SATA_EVNT_DEVICE_DETACHED,
5690 5695 SATA_ADDR_CPORT, 0);
5691 5696
5692 5697 nvp->nvp_signature = NV_NO_SIG;
5693 5698 nvp->nvp_rem_time = ddi_get_lbolt();
5694 5699 nvp->nvp_type = SATA_DTYPE_NONE;
5695 5700 next_timeout_us = 0;
5696 5701
5697 5702 #ifdef SGPIO_SUPPORT
5698 5703 nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5699 5704 SGP_CTLR_PORT_TO_DRV(nvp->nvp_ctlp->nvc_ctlr_num,
5700 5705 nvp->nvp_port_num));
5701 5706 #endif
5702 5707
5703 5708 goto finished;
5704 5709 }
5705 5710
5706 5711 /*
5707 5712 * if the device was already present, and it's still present,
5708 5713 * then abort any outstanding command and issue a reset.
5709 5714 * This may result from transient link errors.
5710 5715 */
5711 5716
5712 5717 if ((nvp->nvp_signature != NV_NO_SIG) &&
5713 5718 (device_present == B_TRUE)) {
5714 5719
5715 5720 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5716 5721 "nv_timeout: spurious link event", NULL);
5717 5722 DTRACE_PROBE(spurious_link_event_p);
5718 5723
5719 5724 (void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5720 5725 B_FALSE);
5721 5726
5722 5727 nvp->nvp_signature = NV_NO_SIG;
5723 5728 nvp->nvp_trans_link_time = ddi_get_lbolt();
5724 5729 nvp->nvp_trans_link_count++;
5725 5730 next_timeout_us = 0;
5726 5731
5727 5732 nv_reset(nvp, "transient link event");
5728 5733
5729 5734 goto finished;
5730 5735 }
5731 5736
5732 5737
5733 5738 /*
5734 5739 * a new device has been inserted
5735 5740 */
5736 5741 if ((nvp->nvp_signature == NV_NO_SIG) &&
5737 5742 (device_present == B_TRUE)) {
5738 5743 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5739 5744 "nv_timeout: device attached", NULL);
5740 5745
5741 5746 DTRACE_PROBE(device_attached_p);
5742 5747 nvp->nvp_add_time = ddi_get_lbolt();
5743 5748 next_timeout_us = 0;
5744 5749 nvp->nvp_reset_count = 0;
5745 5750 nvp->nvp_state = NV_HOTPLUG;
5746 5751 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5747 5752 nv_reset(nvp, "hotplug");
5748 5753
5749 5754 goto finished;
5750 5755 }
5751 5756
5752 5757 /*
5753 5758 * no link, and no prior device. Nothing to do, but
5754 5759 * log this.
5755 5760 */
5756 5761 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5757 5762 "nv_timeout: delayed hot processing no link no prior"
5758 5763 " device", NULL);
5759 5764 DTRACE_PROBE(delayed_hotplug_no_link_no_prior_device_p);
5760 5765
5761 5766 nvp->nvp_trans_link_time = ddi_get_lbolt();
5762 5767 nvp->nvp_trans_link_count++;
5763 5768 next_timeout_us = 0;
5764 5769
5765 5770 goto finished;
5766 5771 }
5767 5772
5768 5773 /*
5769 5774 * Not yet NCQ-aware - there is only one command active.
5770 5775 */
5771 5776 nv_slotp = &(nvp->nvp_slot[0]);
5772 5777
5773 5778 /*
5774 5779 * perform timeout checking and processing only if there is an
5775 5780 * active packet on the port
5776 5781 */
5777 5782 if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL) {
5778 5783 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5779 5784 sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5780 5785 uint8_t cmd = satacmd->satacmd_cmd_reg;
5781 5786 uint64_t lba;
5782 5787
5783 5788 #if ! defined(__lock_lint) && defined(DEBUG)
5784 5789
5785 5790 lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5786 5791 ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5787 5792 ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5788 5793 ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5789 5794 ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5790 5795 ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5791 5796 #endif
5792 5797
5793 5798 /*
5794 5799 * timeout not needed if there is a polling thread
5795 5800 */
5796 5801 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5797 5802 next_timeout_us = 0;
5798 5803
5799 5804 goto finished;
5800 5805 }
5801 5806
5802 5807 if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5803 5808 spkt->satapkt_time) {
5804 5809
5805 5810 serror = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5806 5811 nvp->nvp_serror);
5807 5812 status = nv_get8(nvp->nvp_ctl_hdl,
5808 5813 nvp->nvp_altstatus);
5809 5814 bmstatus = nv_get8(nvp->nvp_bm_hdl,
5810 5815 nvp->nvp_bmisx);
5811 5816
5812 5817 nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5813 5818 "nv_timeout: aborting: "
5814 5819 "nvslot_stime: %ld max ticks till timeout: %ld "
5815 5820 "cur_time: %ld cmd = 0x%x lba = %d seq = %d",
5816 5821 nv_slotp->nvslot_stime,
5817 5822 drv_usectohz(MICROSEC *
5818 5823 spkt->satapkt_time), ddi_get_lbolt(),
5819 5824 cmd, lba, nvp->nvp_seq);
5820 5825
5821 5826 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5822 5827 "nv_timeout: altstatus = 0x%x bmicx = 0x%x "
5823 5828 "serror = 0x%x previous_cmd = "
5824 5829 "0x%x", status, bmstatus, serror,
5825 5830 nvp->nvp_previous_cmd);
5826 5831
5827 5832
5828 5833 DTRACE_PROBE1(nv_timeout_packet_p, int, nvp);
5829 5834
5830 5835 if (nvp->nvp_mcp5x_int_status != NULL) {
5831 5836
5832 5837 int_status = nv_get16(
5833 5838 nvp->nvp_ctlp->nvc_bar_hdl[5],
5834 5839 nvp->nvp_mcp5x_int_status);
5835 5840 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5836 5841 "int_status = 0x%x", int_status);
5837 5842
5838 5843 if (int_status & MCP5X_INT_COMPLETE) {
5839 5844 /*
5840 5845 * Completion interrupt was missed.
5841 5846 * Issue warning message once.
5842 5847 */
5843 5848 if (!intr_warn_once) {
5844 5849
5845 5850 nv_cmn_err(CE_WARN,
5846 5851 nvp->nvp_ctlp,
5847 5852 nvp,
5848 5853 "nv_sata: missing command "
5849 5854 "completion interrupt");
5850 5855 intr_warn_once = 1;
5851 5856
5852 5857 }
5853 5858
5854 5859 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp,
5855 5860 nvp, "timeout detected with "
5856 5861 "interrupt ready - calling "
5857 5862 "int directly", NULL);
5858 5863
5859 5864 mutex_exit(&nvp->nvp_mutex);
5860 5865 (void) mcp5x_intr_port(nvp);
5861 5866 mutex_enter(&nvp->nvp_mutex);
5862 5867
5863 5868 } else {
5864 5869 /*
5865 5870 * True timeout and not a missing
5866 5871 * interrupt.
5867 5872 */
5868 5873 DTRACE_PROBE1(timeout_abort_active_p,
5869 5874 int *, nvp);
5870 5875 (void) nv_abort_active(nvp, spkt,
5871 5876 SATA_PKT_TIMEOUT, B_TRUE);
5872 5877 }
5873 5878 } else {
5874 5879 (void) nv_abort_active(nvp, spkt,
5875 5880 SATA_PKT_TIMEOUT, B_TRUE);
5876 5881 }
5877 5882
5878 5883 } else {
5879 5884 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5880 5885 "nv_timeout:"
5881 5886 " still in use so restarting timeout",
5882 5887 NULL);
5883 5888
5884 5889 next_timeout_us = NV_ONE_SEC;
5885 5890 }
5886 5891 } else {
5887 5892 /*
5888 5893 * there was no active packet, so do not re-enable timeout
5889 5894 */
5890 5895 next_timeout_us = 0;
5891 5896 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5892 5897 "nv_timeout: no active packet so not re-arming "
5893 5898 "timeout", NULL);
5894 5899 }
5895 5900
5896 5901 finished:
5897 5902
5898 5903 nv_setup_timeout(nvp, next_timeout_us);
5899 5904
5900 5905 mutex_exit(&nvp->nvp_mutex);
5901 5906 }
5902 5907
5903 5908
5904 5909 /*
5905 5910 * enable or disable the 3 interrupt types the driver is
5906 5911 * interested in: completion, add and remove.
5907 5912 */
5908 5913 static void
5909 5914 ck804_set_intr(nv_port_t *nvp, int flag)
5910 5915 {
5911 5916 nv_ctl_t *nvc = nvp->nvp_ctlp;
5912 5917 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5913 5918 uchar_t *bar5 = nvc->nvc_bar_addr[5];
5914 5919 uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5915 5920 CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5916 5921 uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5917 5922 uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5918 5923
5919 5924 if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5920 5925 int_en = nv_get8(bar5_hdl,
5921 5926 (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5922 5927 int_en &= ~intr_bits[port];
5923 5928 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5924 5929 int_en);
5925 5930 return;
5926 5931 }
5927 5932
5928 5933 ASSERT(mutex_owned(&nvp->nvp_mutex));
5929 5934
5930 5935 /*
5931 5936 * controller level lock also required since access to an 8-bit
5932 5937 * interrupt register is shared between both channels.
5933 5938 */
5934 5939 mutex_enter(&nvc->nvc_mutex);
5935 5940
5936 5941 if (flag & NV_INTR_CLEAR_ALL) {
5937 5942 NVLOG(NVDBG_INTR, nvc, nvp,
5938 5943 "ck804_set_intr: NV_INTR_CLEAR_ALL", NULL);
5939 5944
5940 5945 intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5941 5946 (uint8_t *)(nvc->nvc_ck804_int_status));
5942 5947
5943 5948 if (intr_status & clear_all_bits[port]) {
5944 5949
5945 5950 nv_put8(nvc->nvc_bar_hdl[5],
5946 5951 (uint8_t *)(nvc->nvc_ck804_int_status),
5947 5952 clear_all_bits[port]);
5948 5953
5949 5954 NVLOG(NVDBG_INTR, nvc, nvp,
5950 5955 "interrupt bits cleared %x",
5951 5956 intr_status & clear_all_bits[port]);
5952 5957 }
5953 5958 }
5954 5959
5955 5960 if (flag & NV_INTR_DISABLE) {
5956 5961 NVLOG(NVDBG_INTR, nvc, nvp,
5957 5962 "ck804_set_intr: NV_INTR_DISABLE", NULL);
5958 5963 int_en = nv_get8(bar5_hdl,
5959 5964 (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5960 5965 int_en &= ~intr_bits[port];
5961 5966 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5962 5967 int_en);
5963 5968 }
5964 5969
5965 5970 if (flag & NV_INTR_ENABLE) {
5966 5971 NVLOG(NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE",
5967 5972 NULL);
5968 5973 int_en = nv_get8(bar5_hdl,
5969 5974 (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5970 5975 int_en |= intr_bits[port];
5971 5976 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5972 5977 int_en);
5973 5978 }
5974 5979
5975 5980 mutex_exit(&nvc->nvc_mutex);
5976 5981 }
5977 5982
5978 5983
5979 5984 /*
5980 5985 * enable or disable the 3 interrupts the driver is interested in:
5981 5986 * completion interrupt, hot add, and hot remove interrupt.
5982 5987 */
5983 5988 static void
5984 5989 mcp5x_set_intr(nv_port_t *nvp, int flag)
5985 5990 {
5986 5991 nv_ctl_t *nvc = nvp->nvp_ctlp;
5987 5992 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5988 5993 uint16_t intr_bits =
5989 5994 MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5990 5995 uint16_t int_en;
5991 5996
5992 5997 if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5993 5998 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5994 5999 int_en &= ~intr_bits;
5995 6000 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5996 6001 return;
5997 6002 }
5998 6003
5999 6004 ASSERT(mutex_owned(&nvp->nvp_mutex));
6000 6005
6001 6006 NVLOG(NVDBG_INTR, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag);
6002 6007
6003 6008 if (flag & NV_INTR_CLEAR_ALL) {
6004 6009 NVLOG(NVDBG_INTR, nvc, nvp,
6005 6010 "mcp5x_set_intr: NV_INTR_CLEAR_ALL", NULL);
6006 6011 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
6007 6012 }
6008 6013
6009 6014 if (flag & NV_INTR_ENABLE) {
6010 6015 NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE",
6011 6016 NULL);
6012 6017 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6013 6018 int_en |= intr_bits;
6014 6019 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6015 6020 }
6016 6021
6017 6022 if (flag & NV_INTR_DISABLE) {
6018 6023 NVLOG(NVDBG_INTR, nvc, nvp,
6019 6024 "mcp5x_set_intr: NV_INTR_DISABLE", NULL);
6020 6025 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6021 6026 int_en &= ~intr_bits;
6022 6027 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6023 6028 }
6024 6029 }
6025 6030
6026 6031
6027 6032 static void
6028 6033 nv_resume(nv_port_t *nvp)
6029 6034 {
6030 6035 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()", NULL);
6031 6036
6032 6037 mutex_enter(&nvp->nvp_mutex);
6033 6038
6034 6039 if (nvp->nvp_state & NV_DEACTIVATED) {
6035 6040 mutex_exit(&nvp->nvp_mutex);
6036 6041
6037 6042 return;
6038 6043 }
6039 6044
6040 6045 /* Enable interrupt */
6041 6046 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
6042 6047
6043 6048 /*
6044 6049 * Power may have been removed to the port and the
6045 6050 * drive, and/or a drive may have been added or removed.
6046 6051 * Force a reset which will cause a probe and re-establish
6047 6052 * any state needed on the drive.
6048 6053 */
6049 6054 nv_reset(nvp, "resume");
6050 6055
6051 6056 mutex_exit(&nvp->nvp_mutex);
6052 6057 }
6053 6058
6054 6059
6055 6060 static void
6056 6061 nv_suspend(nv_port_t *nvp)
6057 6062 {
6058 6063 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()", NULL);
6059 6064
6060 6065 mutex_enter(&nvp->nvp_mutex);
6061 6066
6062 6067 #ifdef SGPIO_SUPPORT
6063 6068 if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
6064 6069 nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6065 6070 nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6066 6071 }
6067 6072 #endif
6068 6073
6069 6074 if (nvp->nvp_state & NV_DEACTIVATED) {
6070 6075 mutex_exit(&nvp->nvp_mutex);
6071 6076
6072 6077 return;
6073 6078 }
6074 6079
6075 6080 /*
6076 6081 * Stop the timeout handler.
6077 6082 * (It will be restarted in nv_reset() during nv_resume().)
6078 6083 */
6079 6084 if (nvp->nvp_timeout_id) {
6080 6085 (void) untimeout(nvp->nvp_timeout_id);
6081 6086 nvp->nvp_timeout_id = 0;
6082 6087 }
6083 6088
6084 6089 /* Disable interrupt */
6085 6090 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
6086 6091 NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
6087 6092
6088 6093 mutex_exit(&nvp->nvp_mutex);
6089 6094 }
6090 6095
6091 6096
6092 6097 static void
6093 6098 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
6094 6099 {
6095 6100 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6096 6101 sata_cmd_t *scmd = &spkt->satapkt_cmd;
6097 6102 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
6098 6103 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6099 6104 uchar_t status;
6100 6105 struct sata_cmd_flags flags;
6101 6106
6102 6107 sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6103 6108 sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
6104 6109 sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6105 6110
6106 6111 if (spkt == NULL) {
6107 6112
6108 6113 return;
6109 6114 }
6110 6115
6111 6116 /*
6112 6117 * in the error case, implicitly set the return of regs needed
6113 6118 * for error handling.
6114 6119 */
6115 6120 status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
6116 6121 nvp->nvp_altstatus);
6117 6122
6118 6123 flags = scmd->satacmd_flags;
6119 6124
6120 6125 if (status & SATA_STATUS_ERR) {
6121 6126 flags.sata_copy_out_lba_low_msb = B_TRUE;
6122 6127 flags.sata_copy_out_lba_mid_msb = B_TRUE;
6123 6128 flags.sata_copy_out_lba_high_msb = B_TRUE;
6124 6129 flags.sata_copy_out_lba_low_lsb = B_TRUE;
6125 6130 flags.sata_copy_out_lba_mid_lsb = B_TRUE;
6126 6131 flags.sata_copy_out_lba_high_lsb = B_TRUE;
6127 6132 flags.sata_copy_out_error_reg = B_TRUE;
6128 6133 flags.sata_copy_out_sec_count_msb = B_TRUE;
6129 6134 flags.sata_copy_out_sec_count_lsb = B_TRUE;
6130 6135 scmd->satacmd_status_reg = status;
6131 6136 }
6132 6137
6133 6138 if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
6134 6139
6135 6140 /*
6136 6141 * set HOB so that high byte will be read
6137 6142 */
6138 6143 nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
6139 6144
6140 6145 /*
6141 6146 * get the requested high bytes
6142 6147 */
6143 6148 if (flags.sata_copy_out_sec_count_msb) {
6144 6149 scmd->satacmd_sec_count_msb =
6145 6150 nv_get8(cmdhdl, nvp->nvp_count);
6146 6151 }
6147 6152
6148 6153 if (flags.sata_copy_out_lba_low_msb) {
6149 6154 scmd->satacmd_lba_low_msb =
6150 6155 nv_get8(cmdhdl, nvp->nvp_sect);
6151 6156 }
6152 6157
6153 6158 if (flags.sata_copy_out_lba_mid_msb) {
6154 6159 scmd->satacmd_lba_mid_msb =
6155 6160 nv_get8(cmdhdl, nvp->nvp_lcyl);
6156 6161 }
6157 6162
6158 6163 if (flags.sata_copy_out_lba_high_msb) {
6159 6164 scmd->satacmd_lba_high_msb =
6160 6165 nv_get8(cmdhdl, nvp->nvp_hcyl);
6161 6166 }
6162 6167 }
6163 6168
6164 6169 /*
6165 6170 * disable HOB so that low byte is read
6166 6171 */
6167 6172 nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6168 6173
6169 6174 /*
6170 6175 * get the requested low bytes
6171 6176 */
6172 6177 if (flags.sata_copy_out_sec_count_lsb) {
6173 6178 scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6174 6179 }
6175 6180
6176 6181 if (flags.sata_copy_out_lba_low_lsb) {
6177 6182 scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6178 6183 }
6179 6184
6180 6185 if (flags.sata_copy_out_lba_mid_lsb) {
6181 6186 scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6182 6187 }
6183 6188
6184 6189 if (flags.sata_copy_out_lba_high_lsb) {
6185 6190 scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6186 6191 }
6187 6192
6188 6193 /*
6189 6194 * get the device register if requested
6190 6195 */
6191 6196 if (flags.sata_copy_out_device_reg) {
6192 6197 scmd->satacmd_device_reg = nv_get8(cmdhdl, nvp->nvp_drvhd);
6193 6198 }
6194 6199
6195 6200 /*
6196 6201 * get the error register if requested
6197 6202 */
6198 6203 if (flags.sata_copy_out_error_reg) {
6199 6204 scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6200 6205 }
6201 6206 }
6202 6207
6203 6208
6204 6209 /*
6205 6210 * hot plug and remove interrupts can occur when the device is reset.
6206 6211 * Masking the interrupt doesn't always work well because if a
6207 6212 * different interrupt arrives on the other port, the driver can still
6208 6213 * end up checking the state of the other port and discover the hot
6209 6214 * interrupt flag is set even though it was masked. Also, when there are
6210 6215 * errors on the link there can be transient link events which need to be
6211 6216 * masked and eliminated as well.
6212 6217 */
6213 6218 static void
6214 6219 nv_link_event(nv_port_t *nvp, int flag)
6215 6220 {
6216 6221
6217 6222 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_link_event: flag: %s",
6218 6223 flag ? "add" : "remove");
6219 6224
6220 6225 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
6221 6226
6222 6227 nvp->nvp_link_event_time = ddi_get_lbolt();
6223 6228
6224 6229 /*
6225 6230 * if a port has been deactivated, ignore all link events
6226 6231 */
6227 6232 if (nvp->nvp_state & NV_DEACTIVATED) {
6228 6233 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6229 6234 " port deactivated", NULL);
6230 6235 DTRACE_PROBE(ignoring_link_port_deactivated_p);
6231 6236
6232 6237 return;
6233 6238 }
6234 6239
6235 6240 /*
6236 6241 * if the drive has been reset, ignore any transient events. If it's
6237 6242 * a real removal event, nv_monitor_reset() will handle it.
6238 6243 */
6239 6244 if (nvp->nvp_state & NV_RESET) {
6240 6245 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6241 6246 " during reset", NULL);
6242 6247 DTRACE_PROBE(ignoring_link_event_during_reset_p);
6243 6248
6244 6249 return;
6245 6250 }
6246 6251
6247 6252 /*
6248 6253 * if link event processing is already enabled, nothing to
6249 6254 * do.
6250 6255 */
6251 6256 if (nvp->nvp_state & NV_LINK_EVENT) {
6252 6257
6253 6258 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6254 6259 "received link event while processing already in "
6255 6260 "progress", NULL);
6256 6261 DTRACE_PROBE(nv_link_event_already_set_p);
6257 6262
6258 6263 return;
6259 6264 }
6260 6265
6261 6266 DTRACE_PROBE1(link_event_p, int, nvp);
6262 6267
6263 6268 nvp->nvp_state |= NV_LINK_EVENT;
6264 6269
6265 6270 nv_setup_timeout(nvp, NV_LINK_EVENT_SETTLE);
6266 6271 }
6267 6272
6268 6273
6269 6274 /*
6270 6275 * Get request sense data and stuff it the command's sense buffer.
6271 6276 * Start a request sense command in order to get sense data to insert
6272 6277 * in the sata packet's rqsense buffer. The command completion
6273 6278 * processing is in nv_intr_pkt_pio.
6274 6279 *
6275 6280 * The sata common module provides a function to allocate and set-up a
6276 6281 * request sense packet command. The reasons it is not being used here is:
6277 6282 * a) it cannot be called in an interrupt context and this function is
6278 6283 * called in an interrupt context.
6279 6284 * b) it allocates DMA resources that are not used here because this is
6280 6285 * implemented using PIO.
6281 6286 *
6282 6287 * If, in the future, this is changed to use DMA, the sata common module
6283 6288 * should be used to allocate and set-up the error retrieval (request sense)
6284 6289 * command.
6285 6290 */
6286 6291 static int
6287 6292 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6288 6293 {
6289 6294 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6290 6295 sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6291 6296 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6292 6297 int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6293 6298
6294 6299 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6295 6300 "nv_start_rqsense_pio: start", NULL);
6296 6301
6297 6302 /* clear the local request sense buffer before starting the command */
6298 6303 bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6299 6304
6300 6305 /* Write the request sense PACKET command */
6301 6306
6302 6307 /* select the drive */
6303 6308 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6304 6309
6305 6310 /* make certain the drive selected */
6306 6311 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6307 6312 NV_SEC2USEC(5), 0) == B_FALSE) {
6308 6313 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6309 6314 "nv_start_rqsense_pio: drive select failed", NULL);
6310 6315 return (NV_FAILURE);
6311 6316 }
6312 6317
6313 6318 /* set up the command */
6314 6319 nv_put8(cmdhdl, nvp->nvp_feature, 0); /* deassert DMA and OVL */
6315 6320 nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6316 6321 nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6317 6322 nv_put8(cmdhdl, nvp->nvp_sect, 0);
6318 6323 nv_put8(cmdhdl, nvp->nvp_count, 0); /* no tag */
6319 6324
6320 6325 /* initiate the command by writing the command register last */
6321 6326 nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6322 6327
6323 6328 /* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6324 6329 NV_DELAY_NSEC(400);
6325 6330
6326 6331 /*
6327 6332 * Wait for the device to indicate that it is ready for the command
6328 6333 * ATAPI protocol state - HP0: Check_Status_A
6329 6334 */
6330 6335
6331 6336 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6332 6337 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6333 6338 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6334 6339 4000000, 0) == B_FALSE) {
6335 6340 if (nv_get8(cmdhdl, nvp->nvp_status) &
6336 6341 (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6337 6342 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
6338 6343 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6339 6344 "nv_start_rqsense_pio: rqsense dev error (HP0)",
6340 6345 NULL);
6341 6346 } else {
6342 6347 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
6343 6348 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6344 6349 "nv_start_rqsense_pio: rqsense timeout (HP0)",
6345 6350 NULL);
6346 6351 }
6347 6352
6348 6353 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6349 6354 nv_complete_io(nvp, spkt, 0);
6350 6355 nv_reset(nvp, "rqsense_pio");
6351 6356
6352 6357 return (NV_FAILURE);
6353 6358 }
6354 6359
6355 6360 /*
6356 6361 * Put the ATAPI command in the data register
6357 6362 * ATAPI protocol state - HP1: Send_Packet
6358 6363 */
6359 6364
6360 6365 ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6361 6366 (ushort_t *)nvp->nvp_data,
6362 6367 (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6363 6368
6364 6369 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6365 6370 "nv_start_rqsense_pio: exiting into HP3", NULL);
6366 6371
6367 6372 return (NV_SUCCESS);
6368 6373 }
6369 6374
6370 6375 /*
6371 6376 * quiesce(9E) entry point.
6372 6377 *
6373 6378 * This function is called when the system is single-threaded at high
6374 6379 * PIL with preemption disabled. Therefore, this function must not be
6375 6380 * blocked.
6376 6381 *
6377 6382 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6378 6383 * DDI_FAILURE indicates an error condition and should almost never happen.
6379 6384 */
6380 6385 static int
6381 6386 nv_quiesce(dev_info_t *dip)
6382 6387 {
6383 6388 int port, instance = ddi_get_instance(dip);
6384 6389 nv_ctl_t *nvc;
6385 6390
6386 6391 if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6387 6392 return (DDI_FAILURE);
6388 6393
6389 6394 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6390 6395 nv_port_t *nvp = &(nvc->nvc_port[port]);
6391 6396 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6392 6397 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6393 6398 uint32_t sctrl;
6394 6399
6395 6400 /*
6396 6401 * Stop the controllers from generating interrupts.
6397 6402 */
6398 6403 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6399 6404
6400 6405 /*
6401 6406 * clear signature registers
6402 6407 */
6403 6408 nv_put8(cmdhdl, nvp->nvp_sect, 0);
6404 6409 nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6405 6410 nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6406 6411 nv_put8(cmdhdl, nvp->nvp_count, 0);
6407 6412
6408 6413 nvp->nvp_signature = NV_NO_SIG;
6409 6414 nvp->nvp_type = SATA_DTYPE_NONE;
6410 6415 nvp->nvp_state |= NV_RESET;
6411 6416 nvp->nvp_reset_time = ddi_get_lbolt();
6412 6417
6413 6418 /*
6414 6419 * assert reset in PHY by writing a 1 to bit 0 scontrol
6415 6420 */
6416 6421 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6417 6422
6418 6423 nv_put32(bar5_hdl, nvp->nvp_sctrl,
6419 6424 sctrl | SCONTROL_DET_COMRESET);
6420 6425
6421 6426 /*
6422 6427 * wait 1ms
6423 6428 */
6424 6429 drv_usecwait(1000);
6425 6430
6426 6431 /*
6427 6432 * de-assert reset in PHY
6428 6433 */
6429 6434 nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6430 6435 }
6431 6436
6432 6437 return (DDI_SUCCESS);
6433 6438 }
6434 6439
6435 6440
6436 6441 #ifdef SGPIO_SUPPORT
6437 6442 /*
6438 6443 * NVIDIA specific SGPIO LED support
6439 6444 * Please refer to the NVIDIA documentation for additional details
6440 6445 */
6441 6446
6442 6447 /*
6443 6448 * nv_sgp_led_init
6444 6449 * Detect SGPIO support. If present, initialize.
6445 6450 */
6446 6451 static void
6447 6452 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6448 6453 {
6449 6454 uint16_t csrp; /* SGPIO_CSRP from PCI config space */
6450 6455 uint32_t cbp; /* SGPIO_CBP from PCI config space */
6451 6456 nv_sgp_cmn_t *cmn; /* shared data structure */
6452 6457 int i;
6453 6458 char tqname[SGPIO_TQ_NAME_LEN];
6454 6459 extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6455 6460
6456 6461 /*
6457 6462 * Initialize with appropriately invalid values in case this function
6458 6463 * exits without initializing SGPIO (for example, there is no SGPIO
6459 6464 * support).
6460 6465 */
6461 6466 nvc->nvc_sgp_csr = 0;
6462 6467 nvc->nvc_sgp_cbp = NULL;
6463 6468 nvc->nvc_sgp_cmn = NULL;
6464 6469
6465 6470 /*
6466 6471 * Only try to initialize SGPIO LED support if this property
6467 6472 * indicates it should be.
6468 6473 */
6469 6474 if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6470 6475 "enable-sgpio-leds", 0) != 1)
6471 6476 return;
6472 6477
6473 6478 /*
6474 6479 * CK804 can pass the sgpio_detect test even though it does not support
6475 6480 * SGPIO, so don't even look at a CK804.
6476 6481 */
6477 6482 if (nvc->nvc_mcp5x_flag != B_TRUE)
6478 6483 return;
6479 6484
6480 6485 /*
6481 6486 * The NVIDIA SGPIO support can nominally handle 6 drives.
6482 6487 * However, the current implementation only supports 4 drives.
6483 6488 * With two drives per controller, that means only look at the
6484 6489 * first two controllers.
6485 6490 */
6486 6491 if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6487 6492 return;
6488 6493
6489 6494 /* confirm that the SGPIO registers are there */
6490 6495 if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6491 6496 NVLOG(NVDBG_INIT, nvc, NULL,
6492 6497 "SGPIO registers not detected", NULL);
6493 6498 return;
6494 6499 }
6495 6500
6496 6501 /* save off the SGPIO_CSR I/O address */
6497 6502 nvc->nvc_sgp_csr = csrp;
6498 6503
6499 6504 /* map in Control Block */
6500 6505 nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6501 6506 sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6502 6507
6503 6508 /* initialize the SGPIO h/w */
6504 6509 if (nv_sgp_init(nvc) == NV_FAILURE) {
6505 6510 nv_cmn_err(CE_WARN, nvc, NULL,
6506 6511 "Unable to initialize SGPIO");
6507 6512 }
6508 6513
6509 6514 /*
6510 6515 * Initialize the shared space for this instance. This could
6511 6516 * involve allocating the space, saving a pointer to the space
6512 6517 * and starting the taskq that actually turns the LEDs on and off.
6513 6518 * Or, it could involve just getting the pointer to the already
6514 6519 * allocated space.
6515 6520 */
6516 6521
6517 6522 mutex_enter(&nv_sgp_c2c_mutex);
6518 6523
6519 6524 /* try and find our CBP in the mapping table */
6520 6525 cmn = NULL;
6521 6526 for (i = 0; i < NV_MAX_CBPS; i++) {
6522 6527 if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6523 6528 cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6524 6529 break;
6525 6530 }
6526 6531
6527 6532 if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6528 6533 break;
6529 6534 }
6530 6535
6531 6536 if (i >= NV_MAX_CBPS) {
6532 6537 /*
6533 6538 * CBP to shared space mapping table is full
6534 6539 */
6535 6540 nvc->nvc_sgp_cmn = NULL;
6536 6541 nv_cmn_err(CE_WARN, nvc, NULL,
6537 6542 "LED handling not initialized - too many controllers");
6538 6543 } else if (cmn == NULL) {
6539 6544 /*
6540 6545 * Allocate the shared space, point the SGPIO scratch register
6541 6546 * at it and start the led update taskq.
6542 6547 */
6543 6548
6544 6549 /* allocate shared space */
6545 6550 cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6546 6551 KM_SLEEP);
6547 6552 if (cmn == NULL) {
6548 6553 nv_cmn_err(CE_WARN, nvc, NULL,
6549 6554 "Failed to allocate shared data");
6550 6555 return;
6551 6556 }
6552 6557
6553 6558 nvc->nvc_sgp_cmn = cmn;
6554 6559
6555 6560 /* initialize the shared data structure */
6556 6561 cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6557 6562 cmn->nvs_connected = 0;
6558 6563 cmn->nvs_activity = 0;
6559 6564 cmn->nvs_cbp = cbp;
6560 6565
6561 6566 mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6562 6567 mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6563 6568 cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6564 6569
6565 6570 /* put the address in the SGPIO scratch register */
6566 6571 #if defined(__amd64)
6567 6572 nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6568 6573 #else
6569 6574 nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6570 6575 #endif
6571 6576
6572 6577 /* add an entry to the cbp to cmn mapping table */
6573 6578
6574 6579 /* i should be the next available table position */
6575 6580 nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6576 6581 nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6577 6582
6578 6583 /* start the activity LED taskq */
6579 6584
6580 6585 /*
6581 6586 * The taskq name should be unique and the time
6582 6587 */
6583 6588 (void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6584 6589 "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6585 6590 cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6586 6591 TASKQ_DEFAULTPRI, 0);
6587 6592 if (cmn->nvs_taskq == NULL) {
6588 6593 cmn->nvs_taskq_delay = 0;
6589 6594 nv_cmn_err(CE_WARN, nvc, NULL,
6590 6595 "Failed to start activity LED taskq");
6591 6596 } else {
6592 6597 cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6593 6598 (void) ddi_taskq_dispatch(cmn->nvs_taskq,
6594 6599 nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6595 6600 }
6596 6601 } else {
6597 6602 nvc->nvc_sgp_cmn = cmn;
6598 6603 cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6599 6604 }
6600 6605
6601 6606 mutex_exit(&nv_sgp_c2c_mutex);
6602 6607 }
6603 6608
6604 6609 /*
6605 6610 * nv_sgp_detect
6606 6611 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6607 6612 * report back whether both were readable.
6608 6613 */
6609 6614 static int
6610 6615 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6611 6616 uint32_t *cbpp)
6612 6617 {
6613 6618 /* get the SGPIO_CSRP */
6614 6619 *csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6615 6620 if (*csrpp == 0) {
6616 6621 return (NV_FAILURE);
6617 6622 }
6618 6623
6619 6624 /* SGPIO_CSRP is good, get the SGPIO_CBP */
6620 6625 *cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6621 6626 if (*cbpp == 0) {
6622 6627 return (NV_FAILURE);
6623 6628 }
6624 6629
6625 6630 /* SGPIO_CBP is good, so we must support SGPIO */
6626 6631 return (NV_SUCCESS);
6627 6632 }
6628 6633
6629 6634 /*
6630 6635 * nv_sgp_init
6631 6636 * Initialize SGPIO.
6632 6637 * The initialization process is described by NVIDIA, but the hardware does
6633 6638 * not always behave as documented, so several steps have been changed and/or
6634 6639 * omitted.
6635 6640 */
6636 6641 static int
6637 6642 nv_sgp_init(nv_ctl_t *nvc)
6638 6643 {
6639 6644 int seq;
6640 6645 int rval = NV_SUCCESS;
6641 6646 hrtime_t start, end;
6642 6647 uint32_t cmd;
6643 6648 uint32_t status;
6644 6649 int drive_count;
6645 6650
6646 6651 status = nv_sgp_csr_read(nvc);
6647 6652 if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6648 6653 /* SGPIO logic is in reset state and requires initialization */
6649 6654
6650 6655 /* noting the Sequence field value */
6651 6656 seq = SGPIO_CSR_SEQ(status);
6652 6657
6653 6658 /* issue SGPIO_CMD_READ_PARAMS command */
6654 6659 cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6655 6660 nv_sgp_csr_write(nvc, cmd);
6656 6661
6657 6662 DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6658 6663
6659 6664 /* poll for command completion */
6660 6665 start = gethrtime();
6661 6666 end = start + NV_SGP_CMD_TIMEOUT;
6662 6667 for (;;) {
6663 6668 status = nv_sgp_csr_read(nvc);
6664 6669
6665 6670 /* break on error */
6666 6671 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6667 6672 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6668 6673 "Command error during initialization",
6669 6674 NULL);
6670 6675 rval = NV_FAILURE;
6671 6676 break;
6672 6677 }
6673 6678
6674 6679 /* command processing is taking place */
6675 6680 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6676 6681 if (SGPIO_CSR_SEQ(status) != seq) {
6677 6682 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6678 6683 "Sequence number change error",
6679 6684 NULL);
6680 6685 }
6681 6686
6682 6687 break;
6683 6688 }
6684 6689
6685 6690 /* if completion not detected in 2000ms ... */
6686 6691
6687 6692 if (gethrtime() > end)
6688 6693 break;
6689 6694
6690 6695 /* wait 400 ns before checking again */
6691 6696 NV_DELAY_NSEC(400);
6692 6697 }
6693 6698 }
6694 6699
6695 6700 if (rval == NV_FAILURE)
6696 6701 return (rval);
6697 6702
6698 6703 if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6699 6704 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6700 6705 "SGPIO logic not operational after init - state %d",
6701 6706 SGPIO_CSR_SSTAT(status));
6702 6707 /*
6703 6708 * Should return (NV_FAILURE) but the hardware can be
6704 6709 * operational even if the SGPIO Status does not indicate
6705 6710 * this.
6706 6711 */
6707 6712 }
6708 6713
6709 6714 /*
6710 6715 * NVIDIA recommends reading the supported drive count even
6711 6716 * though they also indicate that it is always 4 at this time.
6712 6717 */
6713 6718 drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6714 6719 if (drive_count != SGPIO_DRV_CNT_VALUE) {
6715 6720 NVLOG(NVDBG_INIT, nvc, NULL,
6716 6721 "SGPIO reported undocumented drive count - %d",
6717 6722 drive_count);
6718 6723 }
6719 6724
6720 6725 NVLOG(NVDBG_INIT, nvc, NULL,
6721 6726 "initialized ctlr: %d csr: 0x%08x",
6722 6727 nvc->nvc_ctlr_num, nvc->nvc_sgp_csr);
6723 6728
6724 6729 return (rval);
6725 6730 }
6726 6731
6727 6732 static int
6728 6733 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6729 6734 {
6730 6735 nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6731 6736
6732 6737 if (cmn == NULL)
6733 6738 return (NV_FAILURE);
6734 6739
6735 6740 mutex_enter(&cmn->nvs_slock);
6736 6741 cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6737 6742 mutex_exit(&cmn->nvs_slock);
6738 6743
6739 6744 return (NV_SUCCESS);
6740 6745 }
6741 6746
6742 6747 /*
6743 6748 * nv_sgp_csr_read
6744 6749 * This is just a 32-bit port read from the value that was obtained from the
6745 6750 * PCI config space.
6746 6751 *
6747 6752 * XXX It was advised to use the in[bwl] function for this, even though they
6748 6753 * are obsolete interfaces.
6749 6754 */
6750 6755 static int
6751 6756 nv_sgp_csr_read(nv_ctl_t *nvc)
6752 6757 {
6753 6758 return (inl(nvc->nvc_sgp_csr));
6754 6759 }
6755 6760
6756 6761 /*
6757 6762 * nv_sgp_csr_write
6758 6763 * This is just a 32-bit I/O port write. The port number was obtained from
6759 6764 * the PCI config space.
6760 6765 *
6761 6766 * XXX It was advised to use the out[bwl] function for this, even though they
6762 6767 * are obsolete interfaces.
6763 6768 */
6764 6769 static void
6765 6770 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6766 6771 {
6767 6772 outl(nvc->nvc_sgp_csr, val);
6768 6773 }
6769 6774
6770 6775 /*
6771 6776 * nv_sgp_write_data
6772 6777 * Cause SGPIO to send Control Block data
6773 6778 */
6774 6779 static int
6775 6780 nv_sgp_write_data(nv_ctl_t *nvc)
6776 6781 {
6777 6782 hrtime_t start, end;
6778 6783 uint32_t status;
6779 6784 uint32_t cmd;
6780 6785
6781 6786 /* issue command */
6782 6787 cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6783 6788 nv_sgp_csr_write(nvc, cmd);
6784 6789
6785 6790 /* poll for completion */
6786 6791 start = gethrtime();
6787 6792 end = start + NV_SGP_CMD_TIMEOUT;
6788 6793 for (;;) {
6789 6794 status = nv_sgp_csr_read(nvc);
6790 6795
6791 6796 /* break on error completion */
6792 6797 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6793 6798 break;
6794 6799
6795 6800 /* break on successful completion */
6796 6801 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6797 6802 break;
6798 6803
6799 6804 /* Wait 400 ns and try again */
6800 6805 NV_DELAY_NSEC(400);
6801 6806
6802 6807 if (gethrtime() > end)
6803 6808 break;
6804 6809 }
6805 6810
6806 6811 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6807 6812 return (NV_SUCCESS);
6808 6813
6809 6814 return (NV_FAILURE);
6810 6815 }
6811 6816
6812 6817 /*
6813 6818 * nv_sgp_activity_led_ctl
6814 6819 * This is run as a taskq. It wakes up at a fixed interval and checks to
6815 6820 * see if any of the activity LEDs need to be changed.
6816 6821 */
6817 6822 static void
6818 6823 nv_sgp_activity_led_ctl(void *arg)
6819 6824 {
6820 6825 nv_ctl_t *nvc = (nv_ctl_t *)arg;
6821 6826 nv_sgp_cmn_t *cmn;
6822 6827 volatile nv_sgp_cb_t *cbp;
6823 6828 clock_t ticks;
6824 6829 uint8_t drv_leds;
6825 6830 uint32_t old_leds;
6826 6831 uint32_t new_led_state;
6827 6832 int i;
6828 6833
6829 6834 cmn = nvc->nvc_sgp_cmn;
6830 6835 cbp = nvc->nvc_sgp_cbp;
6831 6836
6832 6837 do {
6833 6838 /* save off the old state of all of the LEDs */
6834 6839 old_leds = cbp->sgpio0_tr;
6835 6840
6836 6841 DTRACE_PROBE3(sgpio__activity__state,
6837 6842 int, cmn->nvs_connected, int, cmn->nvs_activity,
6838 6843 int, old_leds);
6839 6844
6840 6845 new_led_state = 0;
6841 6846
6842 6847 /* for each drive */
6843 6848 for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6844 6849
6845 6850 /* get the current state of the LEDs for the drive */
6846 6851 drv_leds = SGPIO0_TR_DRV(old_leds, i);
6847 6852
6848 6853 if ((cmn->nvs_connected & (1 << i)) == 0) {
6849 6854 /* if not connected, turn off activity */
6850 6855 drv_leds &= ~TR_ACTIVE_MASK;
6851 6856 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6852 6857
6853 6858 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6854 6859 new_led_state |=
6855 6860 SGPIO0_TR_DRV_SET(drv_leds, i);
6856 6861
6857 6862 continue;
6858 6863 }
6859 6864
6860 6865 if ((cmn->nvs_activity & (1 << i)) == 0) {
6861 6866 /* connected, but not active */
6862 6867 drv_leds &= ~TR_ACTIVE_MASK;
6863 6868 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6864 6869
6865 6870 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6866 6871 new_led_state |=
6867 6872 SGPIO0_TR_DRV_SET(drv_leds, i);
6868 6873
6869 6874 continue;
6870 6875 }
6871 6876
6872 6877 /* connected and active */
6873 6878 if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6874 6879 /* was enabled, so disable */
6875 6880 drv_leds &= ~TR_ACTIVE_MASK;
6876 6881 drv_leds |=
6877 6882 TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6878 6883
6879 6884 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6880 6885 new_led_state |=
6881 6886 SGPIO0_TR_DRV_SET(drv_leds, i);
6882 6887 } else {
6883 6888 /* was disabled, so enable */
6884 6889 drv_leds &= ~TR_ACTIVE_MASK;
6885 6890 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6886 6891
6887 6892 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6888 6893 new_led_state |=
6889 6894 SGPIO0_TR_DRV_SET(drv_leds, i);
6890 6895 }
6891 6896
6892 6897 /*
6893 6898 * clear the activity bit
6894 6899 * if there is drive activity again within the
6895 6900 * loop interval (now 1/16 second), nvs_activity
6896 6901 * will be reset and the "connected and active"
6897 6902 * condition above will cause the LED to blink
6898 6903 * off and on at the loop interval rate. The
6899 6904 * rate may be increased (interval shortened) as
6900 6905 * long as it is not more than 1/30 second.
6901 6906 */
6902 6907 mutex_enter(&cmn->nvs_slock);
6903 6908 cmn->nvs_activity &= ~(1 << i);
6904 6909 mutex_exit(&cmn->nvs_slock);
6905 6910 }
6906 6911
6907 6912 DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6908 6913
6909 6914 /* write out LED values */
6910 6915
6911 6916 mutex_enter(&cmn->nvs_slock);
6912 6917 cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6913 6918 cbp->sgpio0_tr |= new_led_state;
6914 6919 cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6915 6920 mutex_exit(&cmn->nvs_slock);
6916 6921
6917 6922 if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6918 6923 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6919 6924 "nv_sgp_write_data failure updating active LED",
6920 6925 NULL);
6921 6926 }
6922 6927
6923 6928 /* now rest for the interval */
6924 6929 mutex_enter(&cmn->nvs_tlock);
6925 6930 ticks = drv_usectohz(cmn->nvs_taskq_delay);
6926 6931 if (ticks > 0)
6927 6932 (void) cv_reltimedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6928 6933 ticks, TR_CLOCK_TICK);
6929 6934 mutex_exit(&cmn->nvs_tlock);
6930 6935 } while (ticks > 0);
6931 6936 }
6932 6937
6933 6938 /*
6934 6939 * nv_sgp_drive_connect
6935 6940 * Set the flag used to indicate that the drive is attached to the HBA.
6936 6941 * Used to let the taskq know that it should turn the Activity LED on.
6937 6942 */
6938 6943 static void
6939 6944 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6940 6945 {
6941 6946 nv_sgp_cmn_t *cmn;
6942 6947
6943 6948 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6944 6949 return;
6945 6950 cmn = nvc->nvc_sgp_cmn;
6946 6951
6947 6952 mutex_enter(&cmn->nvs_slock);
6948 6953 cmn->nvs_connected |= (1 << drive);
6949 6954 mutex_exit(&cmn->nvs_slock);
6950 6955 }
6951 6956
6952 6957 /*
6953 6958 * nv_sgp_drive_disconnect
6954 6959 * Clears the flag used to indicate that the drive is no longer attached
6955 6960 * to the HBA. Used to let the taskq know that it should turn the
6956 6961 * Activity LED off. The flag that indicates that the drive is in use is
6957 6962 * also cleared.
6958 6963 */
6959 6964 static void
6960 6965 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6961 6966 {
6962 6967 nv_sgp_cmn_t *cmn;
6963 6968
6964 6969 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6965 6970 return;
6966 6971 cmn = nvc->nvc_sgp_cmn;
6967 6972
6968 6973 mutex_enter(&cmn->nvs_slock);
6969 6974 cmn->nvs_connected &= ~(1 << drive);
6970 6975 cmn->nvs_activity &= ~(1 << drive);
6971 6976 mutex_exit(&cmn->nvs_slock);
6972 6977 }
6973 6978
6974 6979 /*
6975 6980 * nv_sgp_drive_active
6976 6981 * Sets the flag used to indicate that the drive has been accessed and the
6977 6982 * LED should be flicked off, then on. It is cleared at a fixed time
6978 6983 * interval by the LED taskq and set by the sata command start.
6979 6984 */
6980 6985 static void
6981 6986 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6982 6987 {
6983 6988 nv_sgp_cmn_t *cmn;
6984 6989
6985 6990 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6986 6991 return;
6987 6992 cmn = nvc->nvc_sgp_cmn;
6988 6993
6989 6994 DTRACE_PROBE1(sgpio__active, int, drive);
6990 6995
6991 6996 mutex_enter(&cmn->nvs_slock);
6992 6997 cmn->nvs_activity |= (1 << drive);
6993 6998 mutex_exit(&cmn->nvs_slock);
6994 6999 }
6995 7000
6996 7001
6997 7002 /*
6998 7003 * nv_sgp_locate
6999 7004 * Turns the Locate/OK2RM LED off or on for a particular drive. State is
7000 7005 * maintained in the SGPIO Control Block.
7001 7006 */
7002 7007 static void
7003 7008 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
7004 7009 {
7005 7010 uint8_t leds;
7006 7011 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7007 7012 nv_sgp_cmn_t *cmn;
7008 7013
7009 7014 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7010 7015 return;
7011 7016 cmn = nvc->nvc_sgp_cmn;
7012 7017
7013 7018 if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7014 7019 return;
7015 7020
7016 7021 DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
7017 7022
7018 7023 mutex_enter(&cmn->nvs_slock);
7019 7024
7020 7025 leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7021 7026
7022 7027 leds &= ~TR_LOCATE_MASK;
7023 7028 leds |= TR_LOCATE_SET(value);
7024 7029
7025 7030 cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7026 7031 cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7027 7032
7028 7033 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7029 7034
7030 7035 mutex_exit(&cmn->nvs_slock);
7031 7036
7032 7037 if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7033 7038 nv_cmn_err(CE_WARN, nvc, NULL,
7034 7039 "nv_sgp_write_data failure updating OK2RM/Locate LED");
7035 7040 }
7036 7041 }
7037 7042
7038 7043 /*
7039 7044 * nv_sgp_error
7040 7045 * Turns the Error/Failure LED off or on for a particular drive. State is
7041 7046 * maintained in the SGPIO Control Block.
7042 7047 */
7043 7048 static void
7044 7049 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
7045 7050 {
7046 7051 uint8_t leds;
7047 7052 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7048 7053 nv_sgp_cmn_t *cmn;
7049 7054
7050 7055 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7051 7056 return;
7052 7057 cmn = nvc->nvc_sgp_cmn;
7053 7058
7054 7059 if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7055 7060 return;
7056 7061
7057 7062 DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7058 7063
7059 7064 mutex_enter(&cmn->nvs_slock);
7060 7065
7061 7066 leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7062 7067
7063 7068 leds &= ~TR_ERROR_MASK;
7064 7069 leds |= TR_ERROR_SET(value);
7065 7070
7066 7071 cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7067 7072 cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7068 7073
7069 7074 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7070 7075
7071 7076 mutex_exit(&cmn->nvs_slock);
7072 7077
7073 7078 if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7074 7079 nv_cmn_err(CE_WARN, nvc, NULL,
7075 7080 "nv_sgp_write_data failure updating Fail/Error LED");
7076 7081 }
7077 7082 }
7078 7083
7079 7084 static void
7080 7085 nv_sgp_cleanup(nv_ctl_t *nvc)
7081 7086 {
7082 7087 int drive, i;
7083 7088 uint8_t drv_leds;
7084 7089 uint32_t led_state;
7085 7090 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7086 7091 nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7087 7092 extern void psm_unmap_phys(caddr_t, size_t);
7088 7093
7089 7094 /*
7090 7095 * If the SGPIO Control Block isn't mapped or the shared data
7091 7096 * structure isn't present in this instance, there isn't much that
7092 7097 * can be cleaned up.
7093 7098 */
7094 7099 if ((cb == NULL) || (cmn == NULL))
7095 7100 return;
7096 7101
7097 7102 /* turn off activity LEDs for this controller */
7098 7103 drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7099 7104
7100 7105 /* get the existing LED state */
7101 7106 led_state = cb->sgpio0_tr;
7102 7107
7103 7108 /* turn off port 0 */
7104 7109 drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7105 7110 led_state &= SGPIO0_TR_DRV_CLR(drive);
7106 7111 led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7107 7112
7108 7113 /* turn off port 1 */
7109 7114 drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7110 7115 led_state &= SGPIO0_TR_DRV_CLR(drive);
7111 7116 led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7112 7117
7113 7118 /* set the new led state, which should turn off this ctrl's LEDs */
7114 7119 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7115 7120 (void) nv_sgp_write_data(nvc);
7116 7121
7117 7122 /* clear the controller's in use bit */
7118 7123 mutex_enter(&cmn->nvs_slock);
7119 7124 cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7120 7125 mutex_exit(&cmn->nvs_slock);
7121 7126
7122 7127 if (cmn->nvs_in_use == 0) {
7123 7128 /* if all "in use" bits cleared, take everything down */
7124 7129
7125 7130 if (cmn->nvs_taskq != NULL) {
7126 7131 /* allow activity taskq to exit */
7127 7132 cmn->nvs_taskq_delay = 0;
7128 7133 cv_broadcast(&cmn->nvs_cv);
7129 7134
7130 7135 /* then destroy it */
7131 7136 ddi_taskq_destroy(cmn->nvs_taskq);
7132 7137 }
7133 7138
7134 7139 /* turn off all of the LEDs */
7135 7140 cb->sgpio0_tr = 0;
7136 7141 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7137 7142 (void) nv_sgp_write_data(nvc);
7138 7143
7139 7144 cb->sgpio_sr = NULL;
7140 7145
7141 7146 /* zero out the CBP to cmn mapping */
7142 7147 for (i = 0; i < NV_MAX_CBPS; i++) {
7143 7148 if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7144 7149 nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7145 7150 break;
7146 7151 }
7147 7152
7148 7153 if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7149 7154 break;
7150 7155 }
7151 7156
7152 7157 /* free resources */
7153 7158 cv_destroy(&cmn->nvs_cv);
7154 7159 mutex_destroy(&cmn->nvs_tlock);
7155 7160 mutex_destroy(&cmn->nvs_slock);
7156 7161
7157 7162 kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7158 7163 }
7159 7164
7160 7165 nvc->nvc_sgp_cmn = NULL;
7161 7166
7162 7167 /* unmap the SGPIO Control Block */
7163 7168 psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7164 7169 }
7165 7170 #endif /* SGPIO_SUPPORT */
↓ open down ↓ |
4453 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX