|
@@ -29,6 +29,7 @@
|
29
|
29
|
* Modified 14 February 2016 by Andreas Hardtung (added tx buffer)
|
30
|
30
|
* Modified 01 October 2017 by Eduardo José Tagle (added XON/XOFF)
|
31
|
31
|
* Modified 10 June 2018 by Eduardo José Tagle (See #10991)
|
|
32
|
+ * Templatized 01 October 2018 by Eduardo José Tagle to allow multiple instances
|
32
|
33
|
*/
|
33
|
34
|
|
34
|
35
|
#ifdef __AVR__
|
|
@@ -42,62 +43,26 @@
|
42
|
43
|
#include "MarlinSerial.h"
|
43
|
44
|
#include "../../Marlin.h"
|
44
|
45
|
|
45
|
|
- struct ring_buffer_r {
|
46
|
|
- unsigned char buffer[RX_BUFFER_SIZE];
|
47
|
|
- volatile ring_buffer_pos_t head, tail;
|
48
|
|
- };
|
49
|
|
-
|
50
|
|
- #if TX_BUFFER_SIZE > 0
|
51
|
|
- struct ring_buffer_t {
|
52
|
|
- unsigned char buffer[TX_BUFFER_SIZE];
|
53
|
|
- volatile uint8_t head, tail;
|
54
|
|
- };
|
55
|
|
- #endif
|
56
|
|
-
|
57
|
|
- #if UART_PRESENT(SERIAL_PORT)
|
58
|
|
- ring_buffer_r rx_buffer = { { 0 }, 0, 0 };
|
59
|
|
- #if TX_BUFFER_SIZE > 0
|
60
|
|
- ring_buffer_t tx_buffer = { { 0 }, 0, 0 };
|
61
|
|
- #endif
|
62
|
|
- static bool _written;
|
63
|
|
- #endif
|
64
|
|
-
|
65
|
|
- #if ENABLED(SERIAL_XON_XOFF)
|
66
|
|
- constexpr uint8_t XON_XOFF_CHAR_SENT = 0x80, // XON / XOFF Character was sent
|
67
|
|
- XON_XOFF_CHAR_MASK = 0x1F; // XON / XOFF character to send
|
68
|
|
- // XON / XOFF character definitions
|
69
|
|
- constexpr uint8_t XON_CHAR = 17, XOFF_CHAR = 19;
|
70
|
|
- uint8_t xon_xoff_state = XON_XOFF_CHAR_SENT | XON_CHAR;
|
71
|
|
- #endif
|
72
|
|
-
|
73
|
|
- #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
74
|
|
- uint8_t rx_dropped_bytes = 0;
|
75
|
|
- #endif
|
76
|
|
-
|
77
|
|
- #if ENABLED(SERIAL_STATS_RX_BUFFER_OVERRUNS)
|
78
|
|
- uint8_t rx_buffer_overruns = 0;
|
79
|
|
- #endif
|
80
|
|
-
|
81
|
|
- #if ENABLED(SERIAL_STATS_RX_FRAMING_ERRORS)
|
82
|
|
- uint8_t rx_framing_errors = 0;
|
83
|
|
- #endif
|
84
|
|
-
|
85
|
|
- #if ENABLED(SERIAL_STATS_MAX_RX_QUEUED)
|
86
|
|
- ring_buffer_pos_t rx_max_enqueued = 0;
|
87
|
|
- #endif
|
|
46
|
+ template<typename Cfg> typename MarlinSerial<Cfg>::ring_buffer_r MarlinSerial<Cfg>::rx_buffer = { 0 };
|
|
47
|
+ template<typename Cfg> typename MarlinSerial<Cfg>::ring_buffer_t MarlinSerial<Cfg>::tx_buffer = { 0 };
|
|
48
|
+ template<typename Cfg> bool MarlinSerial<Cfg>::_written = false;
|
|
49
|
+ template<typename Cfg> uint8_t MarlinSerial<Cfg>::xon_xoff_state = MarlinSerial<Cfg>::XON_XOFF_CHAR_SENT | MarlinSerial<Cfg>::XON_CHAR;
|
|
50
|
+ template<typename Cfg> uint8_t MarlinSerial<Cfg>::rx_dropped_bytes = 0;
|
|
51
|
+ template<typename Cfg> uint8_t MarlinSerial<Cfg>::rx_buffer_overruns = 0;
|
|
52
|
+ template<typename Cfg> uint8_t MarlinSerial<Cfg>::rx_framing_errors = 0;
|
|
53
|
+ template<typename Cfg> typename MarlinSerial<Cfg>::ring_buffer_pos_t MarlinSerial<Cfg>::rx_max_enqueued = 0;
|
88
|
54
|
|
89
|
55
|
// A SW memory barrier, to ensure GCC does not overoptimize loops
|
90
|
56
|
#define sw_barrier() asm volatile("": : :"memory");
|
91
|
57
|
|
92
|
|
- #if ENABLED(EMERGENCY_PARSER)
|
93
|
|
- #include "../../feature/emergency_parser.h"
|
94
|
|
- #endif
|
|
58
|
+ #include "../../feature/emergency_parser.h"
|
95
|
59
|
|
96
|
60
|
// "Atomically" read the RX head index value without disabling interrupts:
|
97
|
61
|
// This MUST be called with RX interrupts enabled, and CAN'T be called
|
98
|
62
|
// from the RX ISR itself!
|
99
|
|
- FORCE_INLINE ring_buffer_pos_t atomic_read_rx_head() {
|
100
|
|
- #if RX_BUFFER_SIZE > 256
|
|
63
|
+ template<typename Cfg>
|
|
64
|
+ FORCE_INLINE typename MarlinSerial<Cfg>::ring_buffer_pos_t MarlinSerial<Cfg>::atomic_read_rx_head() {
|
|
65
|
+ if (Cfg::RX_SIZE > 256) {
|
101
|
66
|
// Keep reading until 2 consecutive reads return the same value,
|
102
|
67
|
// meaning there was no update in-between caused by an interrupt.
|
103
|
68
|
// This works because serial RX interrupts happen at a slower rate
|
|
@@ -111,23 +76,25 @@
|
111
|
76
|
sw_barrier();
|
112
|
77
|
} while (vold != vnew);
|
113
|
78
|
return vnew;
|
114
|
|
- #else
|
|
79
|
+ }
|
|
80
|
+ else {
|
115
|
81
|
// With an 8bit index, reads are always atomic. No need for special handling
|
116
|
82
|
return rx_buffer.head;
|
117
|
|
- #endif
|
|
83
|
+ }
|
118
|
84
|
}
|
119
|
85
|
|
120
|
|
- #if RX_BUFFER_SIZE > 256
|
121
|
|
- static volatile bool rx_tail_value_not_stable = false;
|
122
|
|
- static volatile uint16_t rx_tail_value_backup = 0;
|
123
|
|
- #endif
|
|
86
|
+ template<typename Cfg>
|
|
87
|
+ volatile bool MarlinSerial<Cfg>::rx_tail_value_not_stable = false;
|
|
88
|
+ template<typename Cfg>
|
|
89
|
+ volatile uint16_t MarlinSerial<Cfg>::rx_tail_value_backup = 0;
|
124
|
90
|
|
125
|
91
|
// Set RX tail index, taking into account the RX ISR could interrupt
|
126
|
92
|
// the write to this variable in the middle - So a backup strategy
|
127
|
93
|
// is used to ensure reads of the correct values.
|
128
|
94
|
// -Must NOT be called from the RX ISR -
|
129
|
|
- FORCE_INLINE void atomic_set_rx_tail(ring_buffer_pos_t value) {
|
130
|
|
- #if RX_BUFFER_SIZE > 256
|
|
95
|
+ template<typename Cfg>
|
|
96
|
+ FORCE_INLINE void MarlinSerial<Cfg>::atomic_set_rx_tail(typename MarlinSerial<Cfg>::ring_buffer_pos_t value) {
|
|
97
|
+ if (Cfg::RX_SIZE > 256) {
|
131
|
98
|
// Store the new value in the backup
|
132
|
99
|
rx_tail_value_backup = value;
|
133
|
100
|
sw_barrier();
|
|
@@ -140,29 +107,29 @@
|
140
|
107
|
// Signal the new value is completely stored into the value
|
141
|
108
|
rx_tail_value_not_stable = false;
|
142
|
109
|
sw_barrier();
|
143
|
|
- #else
|
|
110
|
+ }
|
|
111
|
+ else
|
144
|
112
|
rx_buffer.tail = value;
|
145
|
|
- #endif
|
146
|
113
|
}
|
147
|
114
|
|
148
|
115
|
// Get the RX tail index, taking into account the read could be
|
149
|
116
|
// interrupting in the middle of the update of that index value
|
150
|
117
|
// -Called from the RX ISR -
|
151
|
|
- FORCE_INLINE ring_buffer_pos_t atomic_read_rx_tail() {
|
152
|
|
- #if RX_BUFFER_SIZE > 256
|
|
118
|
+ template<typename Cfg>
|
|
119
|
+ FORCE_INLINE typename MarlinSerial<Cfg>::ring_buffer_pos_t MarlinSerial<Cfg>::atomic_read_rx_tail() {
|
|
120
|
+ if (Cfg::RX_SIZE > 256) {
|
153
|
121
|
// If the true index is being modified, return the backup value
|
154
|
122
|
if (rx_tail_value_not_stable) return rx_tail_value_backup;
|
155
|
|
- #endif
|
|
123
|
+ }
|
156
|
124
|
// The true index is stable, return it
|
157
|
125
|
return rx_buffer.tail;
|
158
|
126
|
}
|
159
|
127
|
|
160
|
128
|
// (called with RX interrupts disabled)
|
161
|
|
- FORCE_INLINE void store_rxd_char() {
|
|
129
|
+ template<typename Cfg>
|
|
130
|
+ FORCE_INLINE void MarlinSerial<Cfg>::store_rxd_char() {
|
162
|
131
|
|
163
|
|
- #if ENABLED(EMERGENCY_PARSER)
|
164
|
|
- static EmergencyParser::State emergency_state; // = EP_RESET
|
165
|
|
- #endif
|
|
132
|
+ static EmergencyParser::State emergency_state; // = EP_RESET
|
166
|
133
|
|
167
|
134
|
// Get the tail - Nothing can alter its value while this ISR is executing, but there's
|
168
|
135
|
// a chance that this ISR interrupted the main process while it was updating the index.
|
|
@@ -173,27 +140,17 @@
|
173
|
140
|
ring_buffer_pos_t h = rx_buffer.head;
|
174
|
141
|
|
175
|
142
|
// Get the next element
|
176
|
|
- ring_buffer_pos_t i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
143
|
+ ring_buffer_pos_t i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(Cfg::RX_SIZE - 1);
|
177
|
144
|
|
178
|
|
- // This must read the M_UCSRxA register before reading the received byte to detect error causes
|
179
|
|
- #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
180
|
|
- if (TEST(M_UCSRxA, M_DORx) && !++rx_dropped_bytes) --rx_dropped_bytes;
|
181
|
|
- #endif
|
182
|
|
-
|
183
|
|
- #if ENABLED(SERIAL_STATS_RX_BUFFER_OVERRUNS)
|
184
|
|
- if (TEST(M_UCSRxA, M_DORx) && !++rx_buffer_overruns) --rx_buffer_overruns;
|
185
|
|
- #endif
|
186
|
|
-
|
187
|
|
- #if ENABLED(SERIAL_STATS_RX_FRAMING_ERRORS)
|
188
|
|
- if (TEST(M_UCSRxA, M_FEx) && !++rx_framing_errors) --rx_framing_errors;
|
189
|
|
- #endif
|
|
145
|
+ // This must read the R_UCSRA register before reading the received byte to detect error causes
|
|
146
|
+ if (Cfg::DROPPED_RX && B_DOR && !++rx_dropped_bytes) --rx_dropped_bytes;
|
|
147
|
+ if (Cfg::RX_OVERRUNS && B_DOR && !++rx_buffer_overruns) --rx_buffer_overruns;
|
|
148
|
+ if (Cfg::RX_FRAMING_ERRORS && B_FE && !++rx_framing_errors) --rx_framing_errors;
|
190
|
149
|
|
191
|
150
|
// Read the character from the USART
|
192
|
|
- uint8_t c = M_UDRx;
|
|
151
|
+ uint8_t c = R_UDR;
|
193
|
152
|
|
194
|
|
- #if ENABLED(EMERGENCY_PARSER)
|
195
|
|
- emergency_parser.update(emergency_state, c);
|
196
|
|
- #endif
|
|
153
|
+ if (Cfg::EMERGENCYPARSER) emergency_parser.update(emergency_state, c);
|
197
|
154
|
|
198
|
155
|
// If the character is to be stored at the index just before the tail
|
199
|
156
|
// (such that the head would advance to the current tail), the RX FIFO is
|
|
@@ -202,29 +159,28 @@
|
202
|
159
|
rx_buffer.buffer[h] = c;
|
203
|
160
|
h = i;
|
204
|
161
|
}
|
205
|
|
- #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
206
|
|
- else if (!++rx_dropped_bytes) --rx_dropped_bytes;
|
207
|
|
- #endif
|
|
162
|
+ else if (Cfg::DROPPED_RX && !++rx_dropped_bytes)
|
|
163
|
+ --rx_dropped_bytes;
|
208
|
164
|
|
209
|
|
- #if ENABLED(SERIAL_STATS_MAX_RX_QUEUED)
|
|
165
|
+ if (Cfg::MAX_RX_QUEUED) {
|
210
|
166
|
// Calculate count of bytes stored into the RX buffer
|
211
|
|
- const ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
167
|
+ const ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(Cfg::RX_SIZE - 1);
|
212
|
168
|
|
213
|
169
|
// Keep track of the maximum count of enqueued bytes
|
214
|
170
|
NOLESS(rx_max_enqueued, rx_count);
|
215
|
|
- #endif
|
|
171
|
+ }
|
216
|
172
|
|
217
|
|
- #if ENABLED(SERIAL_XON_XOFF)
|
|
173
|
+ if (Cfg::XONOFF) {
|
218
|
174
|
// If the last char that was sent was an XON
|
219
|
175
|
if ((xon_xoff_state & XON_XOFF_CHAR_MASK) == XON_CHAR) {
|
220
|
176
|
|
221
|
177
|
// Bytes stored into the RX buffer
|
222
|
|
- const ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
178
|
+ const ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(Cfg::RX_SIZE - 1);
|
223
|
179
|
|
224
|
180
|
// If over 12.5% of RX buffer capacity, send XOFF before running out of
|
225
|
181
|
// RX buffer space .. 325 bytes @ 250kbits/s needed to let the host react
|
226
|
182
|
// and stop sending bytes. This translates to 13mS propagation time.
|
227
|
|
- if (rx_count >= (RX_BUFFER_SIZE) / 8) {
|
|
183
|
+ if (rx_count >= (Cfg::RX_SIZE) / 8) {
|
228
|
184
|
|
229
|
185
|
// At this point, definitely no TX interrupt was executing, since the TX ISR can't be preempted.
|
230
|
186
|
// Don't enable the TX interrupt here as a means to trigger the XOFF char, because if it happens
|
|
@@ -238,19 +194,17 @@
|
238
|
194
|
// Wait until the TX register becomes empty and send it - Here there could be a problem
|
239
|
195
|
// - While waiting for the TX register to empty, the RX register could receive a new
|
240
|
196
|
// character. This must also handle that situation!
|
241
|
|
- while (!TEST(M_UCSRxA, M_UDREx)) {
|
|
197
|
+ while (!B_UDRE) {
|
242
|
198
|
|
243
|
|
- if (TEST(M_UCSRxA,M_RXCx)) {
|
|
199
|
+ if (B_RXC) {
|
244
|
200
|
// A char arrived while waiting for the TX buffer to be empty - Receive and process it!
|
245
|
201
|
|
246
|
|
- i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
202
|
+ i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(Cfg::RX_SIZE - 1);
|
247
|
203
|
|
248
|
204
|
// Read the character from the USART
|
249
|
|
- c = M_UDRx;
|
|
205
|
+ c = R_UDR;
|
250
|
206
|
|
251
|
|
- #if ENABLED(EMERGENCY_PARSER)
|
252
|
|
- emergency_parser.update(emergency_state, c);
|
253
|
|
- #endif
|
|
207
|
+ if (Cfg::EMERGENCYPARSER) emergency_parser.update(emergency_state, c);
|
254
|
208
|
|
255
|
209
|
// If the character is to be stored at the index just before the tail
|
256
|
210
|
// (such that the head would advance to the current tail), the FIFO is
|
|
@@ -259,19 +213,18 @@
|
259
|
213
|
rx_buffer.buffer[h] = c;
|
260
|
214
|
h = i;
|
261
|
215
|
}
|
262
|
|
- #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
263
|
|
- else if (!++rx_dropped_bytes) --rx_dropped_bytes;
|
264
|
|
- #endif
|
|
216
|
+ else if (Cfg::DROPPED_RX && !++rx_dropped_bytes)
|
|
217
|
+ --rx_dropped_bytes;
|
265
|
218
|
}
|
266
|
219
|
sw_barrier();
|
267
|
220
|
}
|
268
|
221
|
|
269
|
|
- M_UDRx = XOFF_CHAR;
|
|
222
|
+ R_UDR = XOFF_CHAR;
|
270
|
223
|
|
271
|
224
|
// Clear the TXC bit -- "can be cleared by writing a one to its bit
|
272
|
225
|
// location". This makes sure flush() won't return until the bytes
|
273
|
226
|
// actually got written
|
274
|
|
- SBI(M_UCSRxA, M_TXCx);
|
|
227
|
+ B_TXC = 1;
|
275
|
228
|
|
276
|
229
|
// At this point there could be a race condition between the write() function
|
277
|
230
|
// and this sending of the XOFF char. This interrupt could happen between the
|
|
@@ -280,19 +233,18 @@
|
280
|
233
|
// sure the write() function will succeed is to wait for the XOFF char to be
|
281
|
234
|
// completely sent. Since an extra character could be received during the wait
|
282
|
235
|
// it must also be handled!
|
283
|
|
- while (!TEST(M_UCSRxA, M_UDREx)) {
|
|
236
|
+ while (!B_UDRE) {
|
284
|
237
|
|
285
|
|
- if (TEST(M_UCSRxA,M_RXCx)) {
|
|
238
|
+ if (B_RXC) {
|
286
|
239
|
// A char arrived while waiting for the TX buffer to be empty - Receive and process it!
|
287
|
240
|
|
288
|
|
- i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
241
|
+ i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(Cfg::RX_SIZE - 1);
|
289
|
242
|
|
290
|
243
|
// Read the character from the USART
|
291
|
|
- c = M_UDRx;
|
|
244
|
+ c = R_UDR;
|
292
|
245
|
|
293
|
|
- #if ENABLED(EMERGENCY_PARSER)
|
|
246
|
+ if (Cfg::EMERGENCYPARSER)
|
294
|
247
|
emergency_parser.update(emergency_state, c);
|
295
|
|
- #endif
|
296
|
248
|
|
297
|
249
|
// If the character is to be stored at the index just before the tail
|
298
|
250
|
// (such that the head would advance to the current tail), the FIFO is
|
|
@@ -301,9 +253,8 @@
|
301
|
253
|
rx_buffer.buffer[h] = c;
|
302
|
254
|
h = i;
|
303
|
255
|
}
|
304
|
|
- #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
305
|
|
- else if (!++rx_dropped_bytes) --rx_dropped_bytes;
|
306
|
|
- #endif
|
|
256
|
+ else if (Cfg::DROPPED_RX && !++rx_dropped_bytes)
|
|
257
|
+ --rx_dropped_bytes;
|
307
|
258
|
}
|
308
|
259
|
sw_barrier();
|
309
|
260
|
}
|
|
@@ -312,78 +263,68 @@
|
312
|
263
|
// have any issues writing to the UART TX register if it needs to!
|
313
|
264
|
}
|
314
|
265
|
}
|
315
|
|
- #endif // SERIAL_XON_XOFF
|
|
266
|
+ }
|
316
|
267
|
|
317
|
268
|
// Store the new head value - The main loop will retry until the value is stable
|
318
|
269
|
rx_buffer.head = h;
|
319
|
270
|
}
|
320
|
271
|
|
321
|
|
- #if TX_BUFFER_SIZE > 0
|
322
|
|
-
|
323
|
|
- // (called with TX irqs disabled)
|
324
|
|
- FORCE_INLINE void _tx_udr_empty_irq(void) {
|
325
|
|
-
|
|
272
|
+ // (called with TX irqs disabled)
|
|
273
|
+ template<typename Cfg>
|
|
274
|
+ FORCE_INLINE void MarlinSerial<Cfg>::_tx_udr_empty_irq(void) {
|
|
275
|
+ if (Cfg::TX_SIZE > 0) {
|
326
|
276
|
// Read positions
|
327
|
277
|
uint8_t t = tx_buffer.tail;
|
328
|
278
|
const uint8_t h = tx_buffer.head;
|
329
|
279
|
|
330
|
|
- #if ENABLED(SERIAL_XON_XOFF)
|
|
280
|
+ if (Cfg::XONOFF) {
|
331
|
281
|
// If an XON char is pending to be sent, do it now
|
332
|
282
|
if (xon_xoff_state == XON_CHAR) {
|
333
|
283
|
|
334
|
284
|
// Send the character
|
335
|
|
- M_UDRx = XON_CHAR;
|
|
285
|
+ R_UDR = XON_CHAR;
|
336
|
286
|
|
337
|
287
|
// clear the TXC bit -- "can be cleared by writing a one to its bit
|
338
|
288
|
// location". This makes sure flush() won't return until the bytes
|
339
|
289
|
// actually got written
|
340
|
|
- SBI(M_UCSRxA, M_TXCx);
|
|
290
|
+ B_TXC = 1;
|
341
|
291
|
|
342
|
292
|
// Remember we sent it.
|
343
|
293
|
xon_xoff_state = XON_CHAR | XON_XOFF_CHAR_SENT;
|
344
|
294
|
|
345
|
295
|
// If nothing else to transmit, just disable TX interrupts.
|
346
|
|
- if (h == t) CBI(M_UCSRxB, M_UDRIEx); // (Non-atomic, could be reenabled by the main program, but eventually this will succeed)
|
|
296
|
+ if (h == t) B_UDRIE = 0; // (Non-atomic, could be reenabled by the main program, but eventually this will succeed)
|
347
|
297
|
|
348
|
298
|
return;
|
349
|
299
|
}
|
350
|
|
- #endif
|
|
300
|
+ }
|
351
|
301
|
|
352
|
302
|
// If nothing to transmit, just disable TX interrupts. This could
|
353
|
303
|
// happen as the result of the non atomicity of the disabling of RX
|
354
|
304
|
// interrupts that could end reenabling TX interrupts as a side effect.
|
355
|
305
|
if (h == t) {
|
356
|
|
- CBI(M_UCSRxB, M_UDRIEx); // (Non-atomic, could be reenabled by the main program, but eventually this will succeed)
|
|
306
|
+ B_UDRIE = 0; // (Non-atomic, could be reenabled by the main program, but eventually this will succeed)
|
357
|
307
|
return;
|
358
|
308
|
}
|
359
|
309
|
|
360
|
310
|
// There is something to TX, Send the next byte
|
361
|
311
|
const uint8_t c = tx_buffer.buffer[t];
|
362
|
|
- t = (t + 1) & (TX_BUFFER_SIZE - 1);
|
363
|
|
- M_UDRx = c;
|
|
312
|
+ t = (t + 1) & (Cfg::TX_SIZE - 1);
|
|
313
|
+ R_UDR = c;
|
364
|
314
|
tx_buffer.tail = t;
|
365
|
315
|
|
366
|
316
|
// Clear the TXC bit (by writing a one to its bit location).
|
367
|
317
|
// Ensures flush() won't return until the bytes are actually written/
|
368
|
|
- SBI(M_UCSRxA, M_TXCx);
|
|
318
|
+ B_TXC = 1;
|
369
|
319
|
|
370
|
320
|
// Disable interrupts if there is nothing to transmit following this byte
|
371
|
|
- if (h == t) CBI(M_UCSRxB, M_UDRIEx); // (Non-atomic, could be reenabled by the main program, but eventually this will succeed)
|
|
321
|
+ if (h == t) B_UDRIE = 0; // (Non-atomic, could be reenabled by the main program, but eventually this will succeed)
|
372
|
322
|
}
|
373
|
|
-
|
374
|
|
- #ifdef M_USARTx_UDRE_vect
|
375
|
|
- ISR(M_USARTx_UDRE_vect) { _tx_udr_empty_irq(); }
|
376
|
|
- #endif
|
377
|
|
-
|
378
|
|
- #endif // TX_BUFFER_SIZE
|
379
|
|
-
|
380
|
|
- #ifdef M_USARTx_RX_vect
|
381
|
|
- ISR(M_USARTx_RX_vect) { store_rxd_char(); }
|
382
|
|
- #endif
|
|
323
|
+ }
|
383
|
324
|
|
384
|
325
|
// Public Methods
|
385
|
|
-
|
386
|
|
- void MarlinSerial::begin(const long baud) {
|
|
326
|
+ template<typename Cfg>
|
|
327
|
+ void MarlinSerial<Cfg>::begin(const long baud) {
|
387
|
328
|
uint16_t baud_setting;
|
388
|
329
|
bool useU2X = true;
|
389
|
330
|
|
|
@@ -394,41 +335,41 @@
|
394
|
335
|
if (baud == 57600) useU2X = false;
|
395
|
336
|
#endif
|
396
|
337
|
|
|
338
|
+ R_UCSRA = 0;
|
397
|
339
|
if (useU2X) {
|
398
|
|
- M_UCSRxA = _BV(M_U2Xx);
|
|
340
|
+ B_U2X = 1;
|
399
|
341
|
baud_setting = (F_CPU / 4 / baud - 1) / 2;
|
400
|
342
|
}
|
401
|
|
- else {
|
402
|
|
- M_UCSRxA = 0;
|
|
343
|
+ else
|
403
|
344
|
baud_setting = (F_CPU / 8 / baud - 1) / 2;
|
404
|
|
- }
|
405
|
345
|
|
406
|
346
|
// assign the baud_setting, a.k.a. ubbr (USART Baud Rate Register)
|
407
|
|
- M_UBRRxH = baud_setting >> 8;
|
408
|
|
- M_UBRRxL = baud_setting;
|
409
|
|
-
|
410
|
|
- SBI(M_UCSRxB, M_RXENx);
|
411
|
|
- SBI(M_UCSRxB, M_TXENx);
|
412
|
|
- SBI(M_UCSRxB, M_RXCIEx);
|
413
|
|
- #if TX_BUFFER_SIZE > 0
|
414
|
|
- CBI(M_UCSRxB, M_UDRIEx);
|
415
|
|
- #endif
|
|
347
|
+ R_UBRRH = baud_setting >> 8;
|
|
348
|
+ R_UBRRL = baud_setting;
|
|
349
|
+
|
|
350
|
+ B_RXEN = 1;
|
|
351
|
+ B_TXEN = 1;
|
|
352
|
+ B_RXCIE = 1;
|
|
353
|
+ if (Cfg::TX_SIZE > 0) B_UDRIE = 0;
|
416
|
354
|
_written = false;
|
417
|
355
|
}
|
418
|
356
|
|
419
|
|
- void MarlinSerial::end() {
|
420
|
|
- CBI(M_UCSRxB, M_RXENx);
|
421
|
|
- CBI(M_UCSRxB, M_TXENx);
|
422
|
|
- CBI(M_UCSRxB, M_RXCIEx);
|
423
|
|
- CBI(M_UCSRxB, M_UDRIEx);
|
|
357
|
+ template<typename Cfg>
|
|
358
|
+ void MarlinSerial<Cfg>::end() {
|
|
359
|
+ B_RXEN = 0;
|
|
360
|
+ B_TXEN = 0;
|
|
361
|
+ B_RXCIE = 0;
|
|
362
|
+ B_UDRIE = 0;
|
424
|
363
|
}
|
425
|
364
|
|
426
|
|
- int MarlinSerial::peek(void) {
|
|
365
|
+ template<typename Cfg>
|
|
366
|
+ int MarlinSerial<Cfg>::peek(void) {
|
427
|
367
|
const ring_buffer_pos_t h = atomic_read_rx_head(), t = rx_buffer.tail;
|
428
|
368
|
return h == t ? -1 : rx_buffer.buffer[t];
|
429
|
369
|
}
|
430
|
370
|
|
431
|
|
- int MarlinSerial::read(void) {
|
|
371
|
+ template<typename Cfg>
|
|
372
|
+ int MarlinSerial<Cfg>::read(void) {
|
432
|
373
|
const ring_buffer_pos_t h = atomic_read_rx_head();
|
433
|
374
|
|
434
|
375
|
// Read the tail. Main thread owns it, so it is safe to directly read it
|
|
@@ -439,42 +380,45 @@
|
439
|
380
|
|
440
|
381
|
// Get the next char
|
441
|
382
|
const int v = rx_buffer.buffer[t];
|
442
|
|
- t = (ring_buffer_pos_t)(t + 1) & (RX_BUFFER_SIZE - 1);
|
|
383
|
+ t = (ring_buffer_pos_t)(t + 1) & (Cfg::RX_SIZE - 1);
|
443
|
384
|
|
444
|
385
|
// Advance tail - Making sure the RX ISR will always get an stable value, even
|
445
|
386
|
// if it interrupts the writing of the value of that variable in the middle.
|
446
|
387
|
atomic_set_rx_tail(t);
|
447
|
388
|
|
448
|
|
- #if ENABLED(SERIAL_XON_XOFF)
|
|
389
|
+ if (Cfg::XONOFF) {
|
449
|
390
|
// If the XOFF char was sent, or about to be sent...
|
450
|
391
|
if ((xon_xoff_state & XON_XOFF_CHAR_MASK) == XOFF_CHAR) {
|
451
|
392
|
// Get count of bytes in the RX buffer
|
452
|
|
- const ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
453
|
|
- if (rx_count < (RX_BUFFER_SIZE) / 10) {
|
454
|
|
- #if TX_BUFFER_SIZE > 0
|
|
393
|
+ const ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(Cfg::RX_SIZE - 1);
|
|
394
|
+ if (rx_count < (Cfg::RX_SIZE) / 10) {
|
|
395
|
+ if (Cfg::TX_SIZE > 0) {
|
455
|
396
|
// Signal we want an XON character to be sent.
|
456
|
397
|
xon_xoff_state = XON_CHAR;
|
457
|
398
|
// Enable TX ISR. Non atomic, but it will eventually enable them
|
458
|
|
- SBI(M_UCSRxB, M_UDRIEx);
|
459
|
|
- #else
|
|
399
|
+ B_UDRIE = 1;
|
|
400
|
+ }
|
|
401
|
+ else {
|
460
|
402
|
// If not using TX interrupts, we must send the XON char now
|
461
|
403
|
xon_xoff_state = XON_CHAR | XON_XOFF_CHAR_SENT;
|
462
|
|
- while (!TEST(M_UCSRxA, M_UDREx)) sw_barrier();
|
463
|
|
- M_UDRx = XON_CHAR;
|
464
|
|
- #endif
|
|
404
|
+ while (!B_UDRE) sw_barrier();
|
|
405
|
+ R_UDR = XON_CHAR;
|
|
406
|
+ }
|
465
|
407
|
}
|
466
|
408
|
}
|
467
|
|
- #endif
|
|
409
|
+ }
|
468
|
410
|
|
469
|
411
|
return v;
|
470
|
412
|
}
|
471
|
413
|
|
472
|
|
- ring_buffer_pos_t MarlinSerial::available(void) {
|
|
414
|
+ template<typename Cfg>
|
|
415
|
+ typename MarlinSerial<Cfg>::ring_buffer_pos_t MarlinSerial<Cfg>::available(void) {
|
473
|
416
|
const ring_buffer_pos_t h = atomic_read_rx_head(), t = rx_buffer.tail;
|
474
|
|
- return (ring_buffer_pos_t)(RX_BUFFER_SIZE + h - t) & (RX_BUFFER_SIZE - 1);
|
|
417
|
+ return (ring_buffer_pos_t)(Cfg::RX_SIZE + h - t) & (Cfg::RX_SIZE - 1);
|
475
|
418
|
}
|
476
|
419
|
|
477
|
|
- void MarlinSerial::flush(void) {
|
|
420
|
+ template<typename Cfg>
|
|
421
|
+ void MarlinSerial<Cfg>::flush(void) {
|
478
|
422
|
|
479
|
423
|
// Set the tail to the head:
|
480
|
424
|
// - Read the RX head index in a safe way. (See atomic_read_rx_head.)
|
|
@@ -482,26 +426,36 @@
|
482
|
426
|
// if it interrupts the writing of the value of that variable in the middle.
|
483
|
427
|
atomic_set_rx_tail(atomic_read_rx_head());
|
484
|
428
|
|
485
|
|
- #if ENABLED(SERIAL_XON_XOFF)
|
|
429
|
+ if (Cfg::XONOFF) {
|
486
|
430
|
// If the XOFF char was sent, or about to be sent...
|
487
|
431
|
if ((xon_xoff_state & XON_XOFF_CHAR_MASK) == XOFF_CHAR) {
|
488
|
|
- #if TX_BUFFER_SIZE > 0
|
|
432
|
+ if (Cfg::TX_SIZE > 0) {
|
489
|
433
|
// Signal we want an XON character to be sent.
|
490
|
434
|
xon_xoff_state = XON_CHAR;
|
491
|
435
|
// Enable TX ISR. Non atomic, but it will eventually enable it.
|
492
|
|
- SBI(M_UCSRxB, M_UDRIEx);
|
493
|
|
- #else
|
|
436
|
+ B_UDRIE = 1;
|
|
437
|
+ }
|
|
438
|
+ else {
|
494
|
439
|
// If not using TX interrupts, we must send the XON char now
|
495
|
440
|
xon_xoff_state = XON_CHAR | XON_XOFF_CHAR_SENT;
|
496
|
|
- while (!TEST(M_UCSRxA, M_UDREx)) sw_barrier();
|
497
|
|
- M_UDRx = XON_CHAR;
|
498
|
|
- #endif
|
|
441
|
+ while (!B_UDRE) sw_barrier();
|
|
442
|
+ R_UDR = XON_CHAR;
|
|
443
|
+ }
|
499
|
444
|
}
|
500
|
|
- #endif
|
|
445
|
+ }
|
501
|
446
|
}
|
502
|
447
|
|
503
|
|
- #if TX_BUFFER_SIZE > 0
|
504
|
|
- void MarlinSerial::write(const uint8_t c) {
|
|
448
|
+ template<typename Cfg>
|
|
449
|
+ void MarlinSerial<Cfg>::write(const uint8_t c) {
|
|
450
|
+ if (Cfg::TX_SIZE == 0) {
|
|
451
|
+
|
|
452
|
+ _written = true;
|
|
453
|
+ while (!B_UDRE) sw_barrier();
|
|
454
|
+ R_UDR = c;
|
|
455
|
+
|
|
456
|
+ }
|
|
457
|
+ else {
|
|
458
|
+
|
505
|
459
|
_written = true;
|
506
|
460
|
|
507
|
461
|
// If the TX interrupts are disabled and the data register
|
|
@@ -511,17 +465,17 @@
|
511
|
465
|
// interrupt overhead becomes a slowdown.
|
512
|
466
|
// Yes, there is a race condition between the sending of the
|
513
|
467
|
// XOFF char at the RX ISR, but it is properly handled there
|
514
|
|
- if (!TEST(M_UCSRxB, M_UDRIEx) && TEST(M_UCSRxA, M_UDREx)) {
|
515
|
|
- M_UDRx = c;
|
|
468
|
+ if (!B_UDRIE && B_UDRE) {
|
|
469
|
+ R_UDR = c;
|
516
|
470
|
|
517
|
471
|
// clear the TXC bit -- "can be cleared by writing a one to its bit
|
518
|
472
|
// location". This makes sure flush() won't return until the bytes
|
519
|
473
|
// actually got written
|
520
|
|
- SBI(M_UCSRxA, M_TXCx);
|
|
474
|
+ B_TXC = 1;
|
521
|
475
|
return;
|
522
|
476
|
}
|
523
|
477
|
|
524
|
|
- const uint8_t i = (tx_buffer.head + 1) & (TX_BUFFER_SIZE - 1);
|
|
478
|
+ const uint8_t i = (tx_buffer.head + 1) & (Cfg::TX_SIZE - 1);
|
525
|
479
|
|
526
|
480
|
// If global interrupts are disabled (as the result of being called from an ISR)...
|
527
|
481
|
if (!ISRS_ENABLED()) {
|
|
@@ -530,7 +484,7 @@
|
530
|
484
|
while (i == tx_buffer.tail) {
|
531
|
485
|
|
532
|
486
|
// If we can transmit another byte, do it.
|
533
|
|
- if (TEST(M_UCSRxA, M_UDREx)) _tx_udr_empty_irq();
|
|
487
|
+ if (B_UDRE) _tx_udr_empty_irq();
|
534
|
488
|
|
535
|
489
|
// Make sure compiler rereads tx_buffer.tail
|
536
|
490
|
sw_barrier();
|
|
@@ -538,7 +492,7 @@
|
538
|
492
|
}
|
539
|
493
|
else {
|
540
|
494
|
// Interrupts are enabled, just wait until there is space
|
541
|
|
- while (i == tx_buffer.tail) { sw_barrier(); }
|
|
495
|
+ while (i == tx_buffer.tail) sw_barrier();
|
542
|
496
|
}
|
543
|
497
|
|
544
|
498
|
// Store new char. head is always safe to move
|
|
@@ -546,10 +500,27 @@
|
546
|
500
|
tx_buffer.head = i;
|
547
|
501
|
|
548
|
502
|
// Enable TX ISR - Non atomic, but it will eventually enable TX ISR
|
549
|
|
- SBI(M_UCSRxB, M_UDRIEx);
|
|
503
|
+ B_UDRIE = 1;
|
550
|
504
|
}
|
|
505
|
+ }
|
|
506
|
+
|
|
507
|
+ template<typename Cfg>
|
|
508
|
+ void MarlinSerial<Cfg>::flushTX(void) {
|
|
509
|
+
|
|
510
|
+ if (Cfg::TX_SIZE == 0) {
|
|
511
|
+ // No bytes written, no need to flush. This special case is needed since there's
|
|
512
|
+ // no way to force the TXC (transmit complete) bit to 1 during initialization.
|
|
513
|
+ if (!_written) return;
|
|
514
|
+
|
|
515
|
+ // Wait until everything was transmitted
|
|
516
|
+ while (!B_TXC) sw_barrier();
|
|
517
|
+
|
|
518
|
+ // At this point nothing is queued anymore (DRIE is disabled) and
|
|
519
|
+ // the hardware finished transmission (TXC is set).
|
|
520
|
+
|
|
521
|
+ }
|
|
522
|
+ else {
|
551
|
523
|
|
552
|
|
- void MarlinSerial::flushTX(void) {
|
553
|
524
|
// No bytes written, no need to flush. This special case is needed since there's
|
554
|
525
|
// no way to force the TXC (transmit complete) bit to 1 during initialization.
|
555
|
526
|
if (!_written) return;
|
|
@@ -558,11 +529,10 @@
|
558
|
529
|
if (!ISRS_ENABLED()) {
|
559
|
530
|
|
560
|
531
|
// Wait until everything was transmitted - We must do polling, as interrupts are disabled
|
561
|
|
- while (tx_buffer.head != tx_buffer.tail || !TEST(M_UCSRxA, M_TXCx)) {
|
|
532
|
+ while (tx_buffer.head != tx_buffer.tail || !B_TXC) {
|
562
|
533
|
|
563
|
534
|
// If there is more space, send an extra character
|
564
|
|
- if (TEST(M_UCSRxA, M_UDREx))
|
565
|
|
- _tx_udr_empty_irq();
|
|
535
|
+ if (B_UDRE) _tx_udr_empty_irq();
|
566
|
536
|
|
567
|
537
|
sw_barrier();
|
568
|
538
|
}
|
|
@@ -570,55 +540,40 @@
|
570
|
540
|
}
|
571
|
541
|
else {
|
572
|
542
|
// Wait until everything was transmitted
|
573
|
|
- while (tx_buffer.head != tx_buffer.tail || !TEST(M_UCSRxA, M_TXCx)) sw_barrier();
|
|
543
|
+ while (tx_buffer.head != tx_buffer.tail || !B_TXC) sw_barrier();
|
574
|
544
|
}
|
575
|
545
|
|
576
|
546
|
// At this point nothing is queued anymore (DRIE is disabled) and
|
577
|
547
|
// the hardware finished transmission (TXC is set).
|
578
|
548
|
}
|
579
|
|
-
|
580
|
|
- #else // TX_BUFFER_SIZE == 0
|
581
|
|
-
|
582
|
|
- void MarlinSerial::write(const uint8_t c) {
|
583
|
|
- _written = true;
|
584
|
|
- while (!TEST(M_UCSRxA, M_UDREx)) sw_barrier();
|
585
|
|
- M_UDRx = c;
|
586
|
|
- }
|
587
|
|
-
|
588
|
|
- void MarlinSerial::flushTX(void) {
|
589
|
|
- // No bytes written, no need to flush. This special case is needed since there's
|
590
|
|
- // no way to force the TXC (transmit complete) bit to 1 during initialization.
|
591
|
|
- if (!_written) return;
|
592
|
|
-
|
593
|
|
- // Wait until everything was transmitted
|
594
|
|
- while (!TEST(M_UCSRxA, M_TXCx)) sw_barrier();
|
595
|
|
-
|
596
|
|
- // At this point nothing is queued anymore (DRIE is disabled) and
|
597
|
|
- // the hardware finished transmission (TXC is set).
|
598
|
|
- }
|
599
|
|
- #endif // TX_BUFFER_SIZE == 0
|
|
549
|
+ }
|
600
|
550
|
|
601
|
551
|
/**
|
602
|
552
|
* Imports from print.h
|
603
|
553
|
*/
|
604
|
554
|
|
605
|
|
- void MarlinSerial::print(char c, int base) {
|
|
555
|
+ template<typename Cfg>
|
|
556
|
+ void MarlinSerial<Cfg>::print(char c, int base) {
|
606
|
557
|
print((long)c, base);
|
607
|
558
|
}
|
608
|
559
|
|
609
|
|
- void MarlinSerial::print(unsigned char b, int base) {
|
|
560
|
+ template<typename Cfg>
|
|
561
|
+ void MarlinSerial<Cfg>::print(unsigned char b, int base) {
|
610
|
562
|
print((unsigned long)b, base);
|
611
|
563
|
}
|
612
|
564
|
|
613
|
|
- void MarlinSerial::print(int n, int base) {
|
|
565
|
+ template<typename Cfg>
|
|
566
|
+ void MarlinSerial<Cfg>::print(int n, int base) {
|
614
|
567
|
print((long)n, base);
|
615
|
568
|
}
|
616
|
569
|
|
617
|
|
- void MarlinSerial::print(unsigned int n, int base) {
|
|
570
|
+ template<typename Cfg>
|
|
571
|
+ void MarlinSerial<Cfg>::print(unsigned int n, int base) {
|
618
|
572
|
print((unsigned long)n, base);
|
619
|
573
|
}
|
620
|
574
|
|
621
|
|
- void MarlinSerial::print(long n, int base) {
|
|
575
|
+ template<typename Cfg>
|
|
576
|
+ void MarlinSerial<Cfg>::print(long n, int base) {
|
622
|
577
|
if (base == 0) write(n);
|
623
|
578
|
else if (base == 10) {
|
624
|
579
|
if (n < 0) { print('-'); n = -n; }
|
|
@@ -628,68 +583,81 @@
|
628
|
583
|
printNumber(n, base);
|
629
|
584
|
}
|
630
|
585
|
|
631
|
|
- void MarlinSerial::print(unsigned long n, int base) {
|
|
586
|
+ template<typename Cfg>
|
|
587
|
+ void MarlinSerial<Cfg>::print(unsigned long n, int base) {
|
632
|
588
|
if (base == 0) write(n);
|
633
|
589
|
else printNumber(n, base);
|
634
|
590
|
}
|
635
|
591
|
|
636
|
|
- void MarlinSerial::print(double n, int digits) {
|
|
592
|
+ template<typename Cfg>
|
|
593
|
+ void MarlinSerial<Cfg>::print(double n, int digits) {
|
637
|
594
|
printFloat(n, digits);
|
638
|
595
|
}
|
639
|
596
|
|
640
|
|
- void MarlinSerial::println(void) {
|
|
597
|
+ template<typename Cfg>
|
|
598
|
+ void MarlinSerial<Cfg>::println(void) {
|
641
|
599
|
print('\r');
|
642
|
600
|
print('\n');
|
643
|
601
|
}
|
644
|
602
|
|
645
|
|
- void MarlinSerial::println(const String& s) {
|
|
603
|
+ template<typename Cfg>
|
|
604
|
+ void MarlinSerial<Cfg>::println(const String& s) {
|
646
|
605
|
print(s);
|
647
|
606
|
println();
|
648
|
607
|
}
|
649
|
608
|
|
650
|
|
- void MarlinSerial::println(const char c[]) {
|
|
609
|
+ template<typename Cfg>
|
|
610
|
+ void MarlinSerial<Cfg>::println(const char c[]) {
|
651
|
611
|
print(c);
|
652
|
612
|
println();
|
653
|
613
|
}
|
654
|
614
|
|
655
|
|
- void MarlinSerial::println(char c, int base) {
|
|
615
|
+ template<typename Cfg>
|
|
616
|
+ void MarlinSerial<Cfg>::println(char c, int base) {
|
656
|
617
|
print(c, base);
|
657
|
618
|
println();
|
658
|
619
|
}
|
659
|
620
|
|
660
|
|
- void MarlinSerial::println(unsigned char b, int base) {
|
|
621
|
+ template<typename Cfg>
|
|
622
|
+ void MarlinSerial<Cfg>::println(unsigned char b, int base) {
|
661
|
623
|
print(b, base);
|
662
|
624
|
println();
|
663
|
625
|
}
|
664
|
626
|
|
665
|
|
- void MarlinSerial::println(int n, int base) {
|
|
627
|
+ template<typename Cfg>
|
|
628
|
+ void MarlinSerial<Cfg>::println(int n, int base) {
|
666
|
629
|
print(n, base);
|
667
|
630
|
println();
|
668
|
631
|
}
|
669
|
632
|
|
670
|
|
- void MarlinSerial::println(unsigned int n, int base) {
|
|
633
|
+ template<typename Cfg>
|
|
634
|
+ void MarlinSerial<Cfg>::println(unsigned int n, int base) {
|
671
|
635
|
print(n, base);
|
672
|
636
|
println();
|
673
|
637
|
}
|
674
|
638
|
|
675
|
|
- void MarlinSerial::println(long n, int base) {
|
|
639
|
+ template<typename Cfg>
|
|
640
|
+ void MarlinSerial<Cfg>::println(long n, int base) {
|
676
|
641
|
print(n, base);
|
677
|
642
|
println();
|
678
|
643
|
}
|
679
|
644
|
|
680
|
|
- void MarlinSerial::println(unsigned long n, int base) {
|
|
645
|
+ template<typename Cfg>
|
|
646
|
+ void MarlinSerial<Cfg>::println(unsigned long n, int base) {
|
681
|
647
|
print(n, base);
|
682
|
648
|
println();
|
683
|
649
|
}
|
684
|
650
|
|
685
|
|
- void MarlinSerial::println(double n, int digits) {
|
|
651
|
+ template<typename Cfg>
|
|
652
|
+ void MarlinSerial<Cfg>::println(double n, int digits) {
|
686
|
653
|
print(n, digits);
|
687
|
654
|
println();
|
688
|
655
|
}
|
689
|
656
|
|
690
|
657
|
// Private Methods
|
691
|
658
|
|
692
|
|
- void MarlinSerial::printNumber(unsigned long n, uint8_t base) {
|
|
659
|
+ template<typename Cfg>
|
|
660
|
+ void MarlinSerial<Cfg>::printNumber(unsigned long n, uint8_t base) {
|
693
|
661
|
if (n) {
|
694
|
662
|
unsigned char buf[8 * sizeof(long)]; // Enough space for base 2
|
695
|
663
|
int8_t i = 0;
|
|
@@ -704,7 +672,8 @@
|
704
|
672
|
print('0');
|
705
|
673
|
}
|
706
|
674
|
|
707
|
|
- void MarlinSerial::printFloat(double number, uint8_t digits) {
|
|
675
|
+ template<typename Cfg>
|
|
676
|
+ void MarlinSerial<Cfg>::printFloat(double number, uint8_t digits) {
|
708
|
677
|
// Handle negative numbers
|
709
|
678
|
if (number < 0.0) {
|
710
|
679
|
print('-');
|
|
@@ -713,9 +682,7 @@
|
713
|
682
|
|
714
|
683
|
// Round correctly so that print(1.999, 2) prints as "2.00"
|
715
|
684
|
double rounding = 0.5;
|
716
|
|
- for (uint8_t i = 0; i < digits; ++i)
|
717
|
|
- rounding *= 0.1;
|
718
|
|
-
|
|
685
|
+ for (uint8_t i = 0; i < digits; ++i) rounding *= 0.1;
|
719
|
686
|
number += rounding;
|
720
|
687
|
|
721
|
688
|
// Extract the integer part of the number and print it
|
|
@@ -736,8 +703,20 @@
|
736
|
703
|
}
|
737
|
704
|
}
|
738
|
705
|
|
|
706
|
+ // Hookup ISR handlers
|
|
707
|
+ ISR(SERIAL_REGNAME(USART,SERIAL_PORT,_RX_vect)) {
|
|
708
|
+ MarlinSerial<MarlinSerialCfg>::store_rxd_char();
|
|
709
|
+ }
|
|
710
|
+
|
|
711
|
+ ISR(SERIAL_REGNAME(USART,SERIAL_PORT,_UDRE_vect)) {
|
|
712
|
+ MarlinSerial<MarlinSerialCfg>::_tx_udr_empty_irq();
|
|
713
|
+ }
|
|
714
|
+
|
739
|
715
|
// Preinstantiate
|
740
|
|
- MarlinSerial customizedSerial;
|
|
716
|
+ template class MarlinSerial<MarlinSerialCfg>;
|
|
717
|
+
|
|
718
|
+ // Instantiate
|
|
719
|
+ MarlinSerial<MarlinSerialCfg> customizedSerial;
|
741
|
720
|
|
742
|
721
|
#endif // !USBCON && (UBRRH || UBRR0H || UBRR1H || UBRR2H || UBRR3H)
|
743
|
722
|
|