|
@@ -28,7 +28,9 @@
|
28
|
28
|
* Modified 28 September 2010 by Mark Sproul
|
29
|
29
|
* Modified 14 February 2016 by Andreas Hardtung (added tx buffer)
|
30
|
30
|
* Modified 01 October 2017 by Eduardo José Tagle (added XON/XOFF)
|
|
31
|
+ * Modified 10 June 2018 by Eduardo José Tagle (See #10991)
|
31
|
32
|
*/
|
|
33
|
+
|
32
|
34
|
#ifdef __AVR__
|
33
|
35
|
|
34
|
36
|
// Disable HardwareSerial.cpp to support chips without a UART (Attiny, etc.)
|
|
@@ -91,6 +93,70 @@
|
91
|
93
|
#include "../../feature/emergency_parser.h"
|
92
|
94
|
#endif
|
93
|
95
|
|
|
96
|
+ // "Atomically" read the RX head index value without disabling interrupts:
|
|
97
|
+ // This MUST be called with RX interrupts enabled, and CAN'T be called
|
|
98
|
+ // from the RX ISR itself!
|
|
99
|
+ FORCE_INLINE ring_buffer_pos_t atomic_read_rx_head() {
|
|
100
|
+ #if RX_BUFFER_SIZE > 256
|
|
101
|
+ // Keep reading until 2 consecutive reads return the same value,
|
|
102
|
+ // meaning there was no update in-between caused by an interrupt.
|
|
103
|
+ // This works because serial RX interrupts happen at a slower rate
|
|
104
|
+ // than successive reads of a variable, so 2 consecutive reads with
|
|
105
|
+ // the same value means no interrupt updated it.
|
|
106
|
+ ring_buffer_pos_t vold, vnew = rx_buffer.head;
|
|
107
|
+ sw_barrier();
|
|
108
|
+ do {
|
|
109
|
+ vold = vnew;
|
|
110
|
+ vnew = rx_buffer.head;
|
|
111
|
+ sw_barrier();
|
|
112
|
+ } while (vold != vnew);
|
|
113
|
+ return vnew;
|
|
114
|
+ #else
|
|
115
|
+ // With an 8bit index, reads are always atomic. No need for special handling
|
|
116
|
+ return rx_buffer.head;
|
|
117
|
+ #endif
|
|
118
|
+ }
|
|
119
|
+
|
|
120
|
+ #if RX_BUFFER_SIZE > 256
|
|
121
|
+ static volatile bool rx_tail_value_not_stable = false;
|
|
122
|
+ static volatile uint16_t rx_tail_value_backup = 0;
|
|
123
|
+ #endif
|
|
124
|
+
|
|
125
|
+ // Set RX tail index, taking into account the RX ISR could interrupt
|
|
126
|
+ // the write to this variable in the middle - So a backup strategy
|
|
127
|
+ // is used to ensure reads of the correct values.
|
|
128
|
+ // -Must NOT be called from the RX ISR -
|
|
129
|
+ FORCE_INLINE void atomic_set_rx_tail(ring_buffer_pos_t value) {
|
|
130
|
+ #if RX_BUFFER_SIZE > 256
|
|
131
|
+ // Store the new value in the backup
|
|
132
|
+ rx_tail_value_backup = value;
|
|
133
|
+ sw_barrier();
|
|
134
|
+ // Flag we are about to change the true value
|
|
135
|
+ rx_tail_value_not_stable = true;
|
|
136
|
+ sw_barrier();
|
|
137
|
+ // Store the new value
|
|
138
|
+ rx_buffer.tail = value;
|
|
139
|
+ sw_barrier();
|
|
140
|
+ // Signal the new value is completely stored into the value
|
|
141
|
+ rx_tail_value_not_stable = false;
|
|
142
|
+ sw_barrier();
|
|
143
|
+ #else
|
|
144
|
+ rx_buffer.tail = value;
|
|
145
|
+ #endif
|
|
146
|
+ }
|
|
147
|
+
|
|
148
|
+ // Get the RX tail index, taking into account the read could be
|
|
149
|
+ // interrupting in the middle of the update of that index value
|
|
150
|
+ // -Called from the RX ISR -
|
|
151
|
+ FORCE_INLINE ring_buffer_pos_t atomic_read_rx_tail() {
|
|
152
|
+ #if RX_BUFFER_SIZE > 256
|
|
153
|
+ // If the true index is being modified, return the backup value
|
|
154
|
+ if (rx_tail_value_not_stable) return rx_tail_value_backup;
|
|
155
|
+ #endif
|
|
156
|
+ // The true index is stable, return it
|
|
157
|
+ return rx_buffer.tail;
|
|
158
|
+ }
|
|
159
|
+
|
94
|
160
|
// (called with RX interrupts disabled)
|
95
|
161
|
FORCE_INLINE void store_rxd_char() {
|
96
|
162
|
|
|
@@ -98,10 +164,12 @@
|
98
|
164
|
static EmergencyParser::State emergency_state; // = EP_RESET
|
99
|
165
|
#endif
|
100
|
166
|
|
101
|
|
- // Get the tail - Nothing can alter its value while we are at this ISR
|
102
|
|
- const ring_buffer_pos_t t = rx_buffer.tail;
|
|
167
|
+ // Get the tail - Nothing can alter its value while this ISR is executing, but there's
|
|
168
|
+ // a chance that this ISR interrupted the main process while it was updating the index.
|
|
169
|
+ // The backup mechanism ensures the correct value is always returned.
|
|
170
|
+ const ring_buffer_pos_t t = atomic_read_rx_tail();
|
103
|
171
|
|
104
|
|
- // Get the head pointer
|
|
172
|
+ // Get the head pointer - This ISR is the only one that modifies its value, so it's safe to read here
|
105
|
173
|
ring_buffer_pos_t h = rx_buffer.head;
|
106
|
174
|
|
107
|
175
|
// Get the next element
|
|
@@ -158,7 +226,7 @@
|
158
|
226
|
// and stop sending bytes. This translates to 13mS propagation time.
|
159
|
227
|
if (rx_count >= (RX_BUFFER_SIZE) / 8) {
|
160
|
228
|
|
161
|
|
- // At this point, definitely no TX interrupt was executing, since the TX isr can't be preempted.
|
|
229
|
+ // At this point, definitely no TX interrupt was executing, since the TX ISR can't be preempted.
|
162
|
230
|
// Don't enable the TX interrupt here as a means to trigger the XOFF char, because if it happens
|
163
|
231
|
// to be in the middle of trying to disable the RX interrupt in the main program, eventually the
|
164
|
232
|
// enabling of the TX interrupt could be undone. The ONLY reliable thing this can do to ensure
|
|
@@ -246,7 +314,7 @@
|
246
|
314
|
}
|
247
|
315
|
#endif // SERIAL_XON_XOFF
|
248
|
316
|
|
249
|
|
- // Store the new head value
|
|
317
|
+ // Store the new head value - The main loop will retry until the value is stable
|
250
|
318
|
rx_buffer.head = h;
|
251
|
319
|
}
|
252
|
320
|
|
|
@@ -356,37 +424,14 @@
|
356
|
424
|
}
|
357
|
425
|
|
358
|
426
|
int MarlinSerial::peek(void) {
|
359
|
|
- #if RX_BUFFER_SIZE > 256
|
360
|
|
- // Disable RX interrupts, but only if non atomic reads
|
361
|
|
- const bool isr_enabled = TEST(M_UCSRxB, M_RXCIEx);
|
362
|
|
- CBI(M_UCSRxB, M_RXCIEx);
|
363
|
|
- #endif
|
364
|
|
-
|
365
|
|
- const int v = rx_buffer.head == rx_buffer.tail ? -1 : rx_buffer.buffer[rx_buffer.tail];
|
366
|
|
-
|
367
|
|
- #if RX_BUFFER_SIZE > 256
|
368
|
|
- // Reenable RX interrupts if they were enabled
|
369
|
|
- if (isr_enabled) SBI(M_UCSRxB, M_RXCIEx);
|
370
|
|
- #endif
|
371
|
|
- return v;
|
|
427
|
+ const ring_buffer_pos_t h = atomic_read_rx_head(), t = rx_buffer.tail;
|
|
428
|
+ return h == t ? -1 : rx_buffer.buffer[t];
|
372
|
429
|
}
|
373
|
430
|
|
374
|
431
|
int MarlinSerial::read(void) {
|
|
432
|
+ const ring_buffer_pos_t h = atomic_read_rx_head();
|
375
|
433
|
|
376
|
|
- #if RX_BUFFER_SIZE > 256
|
377
|
|
- // Disable RX interrupts to ensure atomic reads - This could reenable TX interrupts,
|
378
|
|
- // but this situation is explicitly handled at the TX isr, so no problems there
|
379
|
|
- bool isr_enabled = TEST(M_UCSRxB, M_RXCIEx);
|
380
|
|
- CBI(M_UCSRxB, M_RXCIEx);
|
381
|
|
- #endif
|
382
|
|
-
|
383
|
|
- const ring_buffer_pos_t h = rx_buffer.head;
|
384
|
|
-
|
385
|
|
- #if RX_BUFFER_SIZE > 256
|
386
|
|
- // End critical section
|
387
|
|
- if (isr_enabled) SBI(M_UCSRxB, M_RXCIEx);
|
388
|
|
- #endif
|
389
|
|
-
|
|
434
|
+ // Read the tail. Main thread owns it, so it is safe to directly read it
|
390
|
435
|
ring_buffer_pos_t t = rx_buffer.tail;
|
391
|
436
|
|
392
|
437
|
// If nothing to read, return now
|
|
@@ -396,22 +441,9 @@
|
396
|
441
|
const int v = rx_buffer.buffer[t];
|
397
|
442
|
t = (ring_buffer_pos_t)(t + 1) & (RX_BUFFER_SIZE - 1);
|
398
|
443
|
|
399
|
|
- #if RX_BUFFER_SIZE > 256
|
400
|
|
- // Disable RX interrupts to ensure atomic write to tail, so
|
401
|
|
- // the RX isr can't read partially updated values - This could
|
402
|
|
- // reenable TX interrupts, but this situation is explicitly
|
403
|
|
- // handled at the TX isr, so no problems there
|
404
|
|
- isr_enabled = TEST(M_UCSRxB, M_RXCIEx);
|
405
|
|
- CBI(M_UCSRxB, M_RXCIEx);
|
406
|
|
- #endif
|
407
|
|
-
|
408
|
|
- // Advance tail
|
409
|
|
- rx_buffer.tail = t;
|
410
|
|
-
|
411
|
|
- #if RX_BUFFER_SIZE > 256
|
412
|
|
- // End critical section
|
413
|
|
- if (isr_enabled) SBI(M_UCSRxB, M_RXCIEx);
|
414
|
|
- #endif
|
|
444
|
+ // Advance tail - Making sure the RX ISR will always get an stable value, even
|
|
445
|
+ // if it interrupts the writing of the value of that variable in the middle.
|
|
446
|
+ atomic_set_rx_tail(t);
|
415
|
447
|
|
416
|
448
|
#if ENABLED(SERIAL_XON_XOFF)
|
417
|
449
|
// If the XOFF char was sent, or about to be sent...
|
|
@@ -422,7 +454,7 @@
|
422
|
454
|
#if TX_BUFFER_SIZE > 0
|
423
|
455
|
// Signal we want an XON character to be sent.
|
424
|
456
|
xon_xoff_state = XON_CHAR;
|
425
|
|
- // Enable TX isr. Non atomic, but it will eventually enable them
|
|
457
|
+ // Enable TX ISR. Non atomic, but it will eventually enable them
|
426
|
458
|
SBI(M_UCSRxB, M_UDRIEx);
|
427
|
459
|
#else
|
428
|
460
|
// If not using TX interrupts, we must send the XON char now
|
|
@@ -438,31 +470,17 @@
|
438
|
470
|
}
|
439
|
471
|
|
440
|
472
|
ring_buffer_pos_t MarlinSerial::available(void) {
|
441
|
|
- #if RX_BUFFER_SIZE > 256
|
442
|
|
- const bool isr_enabled = TEST(M_UCSRxB, M_RXCIEx);
|
443
|
|
- CBI(M_UCSRxB, M_RXCIEx);
|
444
|
|
- #endif
|
445
|
|
-
|
446
|
|
- const ring_buffer_pos_t h = rx_buffer.head, t = rx_buffer.tail;
|
447
|
|
-
|
448
|
|
- #if RX_BUFFER_SIZE > 256
|
449
|
|
- if (isr_enabled) SBI(M_UCSRxB, M_RXCIEx);
|
450
|
|
- #endif
|
451
|
|
-
|
|
473
|
+ const ring_buffer_pos_t h = atomic_read_rx_head(), t = rx_buffer.tail;
|
452
|
474
|
return (ring_buffer_pos_t)(RX_BUFFER_SIZE + h - t) & (RX_BUFFER_SIZE - 1);
|
453
|
475
|
}
|
454
|
476
|
|
455
|
477
|
void MarlinSerial::flush(void) {
|
456
|
|
- #if RX_BUFFER_SIZE > 256
|
457
|
|
- const bool isr_enabled = TEST(M_UCSRxB, M_RXCIEx);
|
458
|
|
- CBI(M_UCSRxB, M_RXCIEx);
|
459
|
|
- #endif
|
460
|
|
-
|
461
|
|
- rx_buffer.tail = rx_buffer.head;
|
462
|
478
|
|
463
|
|
- #if RX_BUFFER_SIZE > 256
|
464
|
|
- if (isr_enabled) SBI(M_UCSRxB, M_RXCIEx);
|
465
|
|
- #endif
|
|
479
|
+ // Set the tail to the head:
|
|
480
|
+ // - Read the RX head index in a safe way. (See atomic_read_rx_head.)
|
|
481
|
+ // - Set the tail, making sure the RX ISR will always get a stable value, even
|
|
482
|
+ // if it interrupts the writing of the value of that variable in the middle.
|
|
483
|
+ atomic_set_rx_tail(atomic_read_rx_head());
|
466
|
484
|
|
467
|
485
|
#if ENABLED(SERIAL_XON_XOFF)
|
468
|
486
|
// If the XOFF char was sent, or about to be sent...
|
|
@@ -470,7 +488,7 @@
|
470
|
488
|
#if TX_BUFFER_SIZE > 0
|
471
|
489
|
// Signal we want an XON character to be sent.
|
472
|
490
|
xon_xoff_state = XON_CHAR;
|
473
|
|
- // Enable TX isr. Non atomic, but it will eventually enable it.
|
|
491
|
+ // Enable TX ISR. Non atomic, but it will eventually enable it.
|
474
|
492
|
SBI(M_UCSRxB, M_UDRIEx);
|
475
|
493
|
#else
|
476
|
494
|
// If not using TX interrupts, we must send the XON char now
|
|
@@ -492,7 +510,7 @@
|
492
|
510
|
// effective datarate at high (>500kbit/s) bitrates, where
|
493
|
511
|
// interrupt overhead becomes a slowdown.
|
494
|
512
|
// Yes, there is a race condition between the sending of the
|
495
|
|
- // XOFF char at the RX isr, but it is properly handled there
|
|
513
|
+ // XOFF char at the RX ISR, but it is properly handled there
|
496
|
514
|
if (!TEST(M_UCSRxB, M_UDRIEx) && TEST(M_UCSRxA, M_UDREx)) {
|
497
|
515
|
M_UDRx = c;
|
498
|
516
|
|
|
@@ -527,7 +545,7 @@
|
527
|
545
|
tx_buffer.buffer[tx_buffer.head] = c;
|
528
|
546
|
tx_buffer.head = i;
|
529
|
547
|
|
530
|
|
- // Enable TX isr - Non atomic, but it will eventually enable TX isr
|
|
548
|
+ // Enable TX ISR - Non atomic, but it will eventually enable TX ISR
|
531
|
549
|
SBI(M_UCSRxB, M_UDRIEx);
|
532
|
550
|
}
|
533
|
551
|
|