auto merge of #4791 : jbclements/rust/demodeing-and-deGCing, r=jbclements,brson

r?

It looks to me like the string_reader and tt_reader structs are
GC pointers only because they predate the modern borrow system.
This commit leaves the type names string_reader and tt_reader alone
(they still refer to GC-ed pointers), but internally the functions
now use borrowed pointers to refer to these structures. My guess
would be that it's possible to move this change outward and not
use the GCed pointers at all, but that change looks like it could be
a larger one. Actually, I'm delighted at how quick this change was.
This commit is contained in:
bors 2013-02-07 13:58:32 -08:00
commit 4272765830
2 changed files with 70 additions and 67 deletions

View File

@ -39,7 +39,8 @@ type tt_frame = @{
up: tt_frame_up,
};
pub type tt_reader = @{
pub type tt_reader = @tt_reader_;
pub type tt_reader_ = {
sp_diag: span_handler,
interner: @ident_interner,
mut cur: tt_frame,
@ -87,7 +88,7 @@ pure fn dup_tt_frame(&&f: tt_frame) -> tt_frame {
}
}
pub pure fn dup_tt_reader(&&r: tt_reader) -> tt_reader {
pub pure fn dup_tt_reader(r: &tt_reader_) -> tt_reader {
@{sp_diag: r.sp_diag, interner: r.interner,
mut cur: dup_tt_frame(r.cur),
interpolations: r.interpolations,
@ -96,7 +97,7 @@ pub pure fn dup_tt_reader(&&r: tt_reader) -> tt_reader {
}
pure fn lookup_cur_matched_by_matched(r: tt_reader,
pure fn lookup_cur_matched_by_matched(r: &tt_reader_,
start: @named_match) -> @named_match {
pure fn red(+ad: @named_match, idx: &uint) -> @named_match {
match *ad {
@ -110,15 +111,15 @@ pure fn lookup_cur_matched_by_matched(r: tt_reader,
vec::foldl(start, r.repeat_idx, red)
}
fn lookup_cur_matched(r: tt_reader, name: ident) -> @named_match {
fn lookup_cur_matched(r: &tt_reader_, name: ident) -> @named_match {
lookup_cur_matched_by_matched(r, r.interpolations.get(&name))
}
enum lis {
lis_unconstrained, lis_constraint(uint, ident), lis_contradiction(~str)
}
fn lockstep_iter_size(t: token_tree, r: tt_reader) -> lis {
fn lis_merge(lhs: lis, rhs: lis, r: tt_reader) -> lis {
fn lockstep_iter_size(t: token_tree, r: &tt_reader_) -> lis {
fn lis_merge(lhs: lis, rhs: lis, r: &tt_reader_) -> lis {
match lhs {
lis_unconstrained => rhs,
lis_contradiction(_) => lhs,
@ -150,7 +151,7 @@ fn lockstep_iter_size(t: token_tree, r: tt_reader) -> lis {
}
pub fn tt_next_token(&&r: tt_reader) -> TokenAndSpan {
pub fn tt_next_token(r: &tt_reader_) -> TokenAndSpan {
let ret_val = TokenAndSpan { tok: r.cur_tok, sp: r.cur_span };
while r.cur.idx >= r.cur.readme.len() {
/* done with this set; pop or repeat? */
@ -199,25 +200,26 @@ pub fn tt_next_token(&&r: tt_reader) -> TokenAndSpan {
return ret_val;
}
tt_seq(sp, ref tts, ref sep, zerok) => {
match lockstep_iter_size(tt_seq(sp, (*tts), (*sep), zerok), r) {
lis_unconstrained => {
r.sp_diag.span_fatal(
match lockstep_iter_size(tt_seq(sp, (*tts), (*sep), zerok), r) {
lis_unconstrained => {
r.sp_diag.span_fatal(
sp, /* blame macro writer */
~"attempted to repeat an expression containing no syntax \
variables matched as repeating at this depth");
}
lis_contradiction(ref msg) => {
/* FIXME #2887 blame macro invoker instead*/
r.sp_diag.span_fatal(sp, (*msg));
}
lis_constraint(len, _) => {
if len == 0 {
if !zerok {
~"attempted to repeat an expression \
containing no syntax \
variables matched as repeating at this depth");
}
lis_contradiction(ref msg) => {
/* FIXME #2887 blame macro invoker instead*/
r.sp_diag.span_fatal(sp, (*msg));
}
lis_constraint(len, _) => {
if len == 0 {
if !zerok {
r.sp_diag.span_fatal(sp, /* FIXME #2887 blame invoker
*/
*/
~"this must repeat at least \
once");
}
once");
}
r.cur.idx += 1u;
return tt_next_token(r);

View File

@ -24,24 +24,25 @@ use core::either;
use core::str;
use core::u64;
pub use ext::tt::transcribe::{tt_reader, new_tt_reader};
pub use ext::tt::transcribe::{tt_reader, tt_reader_, new_tt_reader};
use std;
//use std;
pub trait reader {
fn is_eof() -> bool;
fn next_token() -> TokenAndSpan;
fn fatal(~str) -> !;
fn span_diag() -> span_handler;
pure fn interner() -> @token::ident_interner;
fn peek() -> TokenAndSpan;
fn dup() -> reader;
fn is_eof(&self) -> bool;
fn next_token(&self) -> TokenAndSpan;
fn fatal(&self,~str) -> !;
fn span_diag(&self) -> span_handler;
pure fn interner(&self) -> @token::ident_interner;
fn peek(&self) -> TokenAndSpan;
fn dup(&self) -> reader;
}
#[deriving_eq]
pub struct TokenAndSpan {tok: token::Token, sp: span}
pub type string_reader = @{
pub type string_reader = @string_reader_;
pub type string_reader_ = {
span_diagnostic: span_handler,
src: @~str,
// The absolute offset within the codemap of the next character to read
@ -90,7 +91,7 @@ pub fn new_low_level_string_reader(span_diagnostic: span_handler,
// duplicating the string reader is probably a bad idea, in
// that using them will cause interleaved pushes of line
// offsets to the underlying filemap...
fn dup_string_reader(&&r: string_reader) -> string_reader {
fn dup_string_reader(r: &string_reader_) -> string_reader {
@{span_diagnostic: r.span_diagnostic, src: r.src,
mut pos: r.pos,
mut last_pos: r.last_pos,
@ -99,28 +100,28 @@ fn dup_string_reader(&&r: string_reader) -> string_reader {
mut peek_tok: r.peek_tok, mut peek_span: r.peek_span}
}
impl string_reader: reader {
fn is_eof() -> bool { is_eof(self) }
impl string_reader_: reader {
fn is_eof(&self) -> bool { is_eof(self) }
// return the next token. EFFECT: advances the string_reader.
fn next_token() -> TokenAndSpan {
fn next_token(&self) -> TokenAndSpan {
let ret_val = TokenAndSpan {tok: self.peek_tok, sp: self.peek_span};
string_advance_token(self);
return ret_val;
}
fn fatal(m: ~str) -> ! {
fn fatal(&self, m: ~str) -> ! {
self.span_diagnostic.span_fatal(copy self.peek_span, m)
}
fn span_diag() -> span_handler { self.span_diagnostic }
pure fn interner() -> @token::ident_interner { self.interner }
fn peek() -> TokenAndSpan {
fn span_diag(&self) -> span_handler { self.span_diagnostic }
pure fn interner(&self) -> @token::ident_interner { self.interner }
fn peek(&self) -> TokenAndSpan {
TokenAndSpan {tok: self.peek_tok, sp: self.peek_span}
}
fn dup() -> reader { dup_string_reader(self) as reader }
fn dup(&self) -> reader { dup_string_reader(self) as reader }
}
pub impl tt_reader: reader {
fn is_eof() -> bool { self.cur_tok == token::EOF }
fn next_token() -> TokenAndSpan {
pub impl tt_reader_: reader {
fn is_eof(&self) -> bool { self.cur_tok == token::EOF }
fn next_token(&self) -> TokenAndSpan {
/* weird resolve bug: if the following `if`, or any of its
statements are removed, we get resolution errors */
if false {
@ -129,19 +130,19 @@ pub impl tt_reader: reader {
}
tt_next_token(self)
}
fn fatal(m: ~str) -> ! {
fn fatal(&self, m: ~str) -> ! {
self.sp_diag.span_fatal(copy self.cur_span, m);
}
fn span_diag() -> span_handler { self.sp_diag }
pure fn interner() -> @token::ident_interner { self.interner }
fn peek() -> TokenAndSpan {
fn span_diag(&self) -> span_handler { self.sp_diag }
pure fn interner(&self) -> @token::ident_interner { self.interner }
fn peek(&self) -> TokenAndSpan {
TokenAndSpan { tok: self.cur_tok, sp: self.cur_span }
}
fn dup() -> reader { dup_tt_reader(self) as reader }
fn dup(&self) -> reader { dup_tt_reader(self) as reader }
}
// EFFECT: advance peek_tok and peek_span to refer to the next token.
fn string_advance_token(&&r: string_reader) {
fn string_advance_token(r: &string_reader_) {
match (consume_whitespace_and_comments(r)) {
Some(comment) => {
r.peek_tok = comment.tok;
@ -159,11 +160,11 @@ fn string_advance_token(&&r: string_reader) {
}
}
fn byte_offset(rdr: string_reader) -> BytePos {
fn byte_offset(rdr: &string_reader_) -> BytePos {
(rdr.pos - rdr.filemap.start_pos)
}
pub fn get_str_from(rdr: string_reader, start: BytePos) -> ~str {
pub fn get_str_from(rdr: &string_reader_, start: BytePos) -> ~str {
unsafe {
// I'm pretty skeptical about this subtraction. What if there's a
// multi-byte character before the mark?
@ -174,7 +175,7 @@ pub fn get_str_from(rdr: string_reader, start: BytePos) -> ~str {
// EFFECT: advance the StringReader by one character. If a newline is
// discovered, add it to the FileMap's list of line start offsets.
pub fn bump(rdr: string_reader) {
pub fn bump(rdr: &string_reader_) {
rdr.last_pos = rdr.pos;
let current_byte_offset = byte_offset(rdr).to_uint();;
if current_byte_offset < (*rdr.src).len() {
@ -198,10 +199,10 @@ pub fn bump(rdr: string_reader) {
rdr.curr = -1 as char;
}
}
pub fn is_eof(rdr: string_reader) -> bool {
pub fn is_eof(rdr: &string_reader_) -> bool {
rdr.curr == -1 as char
}
pub fn nextch(rdr: string_reader) -> char {
pub fn nextch(rdr: &string_reader_) -> char {
let offset = byte_offset(rdr).to_uint();
if offset < (*rdr.src).len() {
return str::char_at(*rdr.src, offset);
@ -246,7 +247,7 @@ fn is_bin_digit(c: char) -> bool { return c == '0' || c == '1'; }
// EFFECT: eats whitespace and comments.
// returns a Some(sugared-doc-attr) if one exists, None otherwise.
fn consume_whitespace_and_comments(rdr: string_reader)
fn consume_whitespace_and_comments(rdr: &string_reader_)
-> Option<TokenAndSpan> {
while is_whitespace(rdr.curr) { bump(rdr); }
return consume_any_line_comment(rdr);
@ -255,7 +256,7 @@ fn consume_whitespace_and_comments(rdr: string_reader)
// PRECONDITION: rdr.curr is not whitespace
// EFFECT: eats any kind of comment.
// returns a Some(sugared-doc-attr) if one exists, None otherwise
fn consume_any_line_comment(rdr: string_reader)
fn consume_any_line_comment(rdr: &string_reader_)
-> Option<TokenAndSpan> {
if rdr.curr == '/' {
match nextch(rdr) {
@ -298,7 +299,7 @@ fn consume_any_line_comment(rdr: string_reader)
}
// might return a sugared-doc-attr
fn consume_block_comment(rdr: string_reader)
fn consume_block_comment(rdr: &string_reader_)
-> Option<TokenAndSpan> {
// block comments starting with "/**" or "/*!" are doc-comments
@ -337,7 +338,7 @@ fn consume_block_comment(rdr: string_reader)
return consume_whitespace_and_comments(rdr);
}
fn scan_exponent(rdr: string_reader) -> Option<~str> {
fn scan_exponent(rdr: &string_reader_) -> Option<~str> {
let mut c = rdr.curr;
let mut rslt = ~"";
if c == 'e' || c == 'E' {
@ -355,7 +356,7 @@ fn scan_exponent(rdr: string_reader) -> Option<~str> {
} else { return None::<~str>; }
}
fn scan_digits(rdr: string_reader, radix: uint) -> ~str {
fn scan_digits(rdr: &string_reader_, radix: uint) -> ~str {
let mut rslt = ~"";
loop {
let c = rdr.curr;
@ -370,7 +371,7 @@ fn scan_digits(rdr: string_reader, radix: uint) -> ~str {
};
}
fn scan_number(c: char, rdr: string_reader) -> token::Token {
fn scan_number(c: char, rdr: &string_reader_) -> token::Token {
let mut num_str, base = 10u, c = c, n = nextch(rdr);
if c == '0' && n == 'x' {
bump(rdr);
@ -486,7 +487,7 @@ fn scan_number(c: char, rdr: string_reader) -> token::Token {
}
}
fn scan_numeric_escape(rdr: string_reader, n_hex_digits: uint) -> char {
fn scan_numeric_escape(rdr: &string_reader_, n_hex_digits: uint) -> char {
let mut accum_int = 0, i = n_hex_digits;
while i != 0u {
let n = rdr.curr;
@ -501,7 +502,7 @@ fn scan_numeric_escape(rdr: string_reader, n_hex_digits: uint) -> char {
return accum_int as char;
}
fn next_token_inner(rdr: string_reader) -> token::Token {
fn next_token_inner(rdr: &string_reader_) -> token::Token {
let mut accum_str = ~"";
let mut c = rdr.curr;
if (c >= 'a' && c <= 'z')
@ -526,7 +527,7 @@ fn next_token_inner(rdr: string_reader) -> token::Token {
if is_dec_digit(c) {
return scan_number(c, rdr);
}
fn binop(rdr: string_reader, op: token::binop) -> token::Token {
fn binop(rdr: &string_reader_, op: token::binop) -> token::Token {
bump(rdr);
if rdr.curr == '=' {
bump(rdr);
@ -719,7 +720,7 @@ fn next_token_inner(rdr: string_reader) -> token::Token {
}
}
fn consume_whitespace(rdr: string_reader) {
fn consume_whitespace(rdr: &string_reader_) {
while is_whitespace(rdr.curr) && !is_eof(rdr) { bump(rdr); }
}
@ -730,7 +731,7 @@ pub mod test {
use util::interner;
use diagnostic;
use util::testing::{check_equal, check_equal_ptr};
#[test] fn t1 () {
#[tetst] fn t1 () {
let teststr =
@~"/* my source file */
fn main() { io::println(~\"zebra\"); }\n";