optimizations

This commit is contained in:
Daniel Sipka 2015-04-15 22:32:27 +02:00
parent 48fbaeb3c9
commit 127609f392
5 changed files with 51 additions and 49 deletions

View File

@ -31,7 +31,8 @@ render_context::render_context(
delim_start{"{{"},
delim_end{"}}"}
{
state.push(std::unique_ptr<state::render_state>(new state::outside_section));
state.push(std::unique_ptr<state::render_state>(
new state::outside_section));
}
const mstch::node& render_context::find_node(
@ -79,7 +80,7 @@ void render_context::tokenize(const std::string& t, std::vector<token>& toks) {
else
pstate = parse_state::start;
} else if(pstate == parse_state::in_del) {
if (*it== '{') {
if (*it == '{') {
pstate = parse_state::in_esccontent;
} else if (*it == delim_end[0]) {
pstate = parse_state::in_del_end;
@ -97,7 +98,7 @@ void render_context::tokenize(const std::string& t, std::vector<token>& toks) {
pstate = parse_state::start;
toks.push_back({false, false, ws_only, {tok_start, tok_end}});
toks.push_back({true, false, false,
{tok_end +delim_start.size(), it - delim_end.size() +1}});
{tok_end+delim_start.size(), it-delim_end.size()+1}});
ws_only = true;
tok_start = it + 1;
} else {
@ -116,19 +117,18 @@ void render_context::strip_whitespace(std::vector<token>& tokens) {
if (type != token::type::text && type != token::type::variable &&
type != token::type::unescaped_variable)
has_tag = true;
else if (!(*it).is_ws_only())
else if (!(*it).ws_only())
non_space = true;
if ((*it).is_eol()) {
if ((*it).eol()) {
if (has_tag && !non_space)
for (auto line_it = line_begin; line_it != it + 1; ++line_it)
if ((*line_it).is_ws_only())
(*line_it).mark();
if ((*line_it).ws_only()) (*line_it).mark();
non_space = has_tag = false;
line_begin = it + 1;
}
}
for (auto it = tokens.begin(); it != tokens.end();)
((*it).is_marked())?(it = tokens.erase(it)):(++it);
((*it).marked())?(it = tokens.erase(it)):(++it);
}
std::string render_context::render(const std::string& tmplt) {

View File

@ -1,4 +1,5 @@
#include "token.hpp"
#include "utils.hpp"
using namespace mstch;
@ -14,37 +15,23 @@ token::type token::token_info(char c) {
}
}
token::token(bool is_tag, bool eol, bool ws_only, const std::string& raw_val):
eol(eol), ws_only(ws_only), marked(false)
token::token(bool is_tag, bool eol, bool ws_only, const std::string& str):
m_eol(eol), m_ws_only(ws_only), m_marked(false)
{
if(is_tag) {
auto content_begin = raw_val.begin(), content_end = raw_val.end();
parse_state state = parse_state::prews;
if(*content_begin == '{' && *(content_end - 1) == '}') {
state = parse_state::postws;
type_val = type::unescaped_variable;
++content_begin;
--content_end;
}
for(auto it = content_begin; it != content_end;) {
if(state == parse_state::prews && *it != ' ') {
state = parse_state::postws;
if((type_val = token_info(*it++)) == type::variable) {
state = parse_state::content;
content_begin = it -1;
}
} else if(state == parse_state::postws && *it != ' ') {
content_begin = it++;
state = parse_state::content;
} else if(state == parse_state::content && *it == ' ') {
content_end = it;
if(str[0] == '{' && str[str.size() - 1] == '}') {
m_type = type::unescaped_variable;
m_content = {first_not_ws(str.begin() + 1, str.end()),
first_not_ws(str.rbegin() + 1, str.rend()) + 1};
} else {
++it;
auto first = first_not_ws(str.begin(), str.end());
m_type = token_info(*first);
if(m_type != type::variable)
first = first_not_ws(first + 1, str.end());
m_content = {first, first_not_ws(str.rbegin(), str.rend()) + 1};
}
}
content_val = {content_begin, content_end};
} else {
type_val = type::text;
content_val = raw_val;
m_type = type::text;
m_content = str;
}
}

View File

@ -10,20 +10,19 @@ namespace mstch {
text, variable, section_open, section_close, inverted_section_open,
unescaped_variable, comment, partial
};
token(bool is_tag, bool eol, bool ws_only, const std::string& raw_val);
type token_type() const { return type_val; };
const std::string& content() const { return content_val; };
bool is_eol() const { return eol; }
bool is_ws_only() const { return ws_only; }
bool is_marked() const { return marked; }
void mark() { marked = true; };
token(bool is_tag, bool eol, bool ws_only, const std::string& str);
type token_type() const { return m_type; };
const std::string& content() const { return m_content; };
bool eol() const { return m_eol; }
bool ws_only() const { return m_ws_only; }
bool marked() const { return m_marked; }
void mark() { m_marked = true; };
private:
enum class parse_state { prews, postws, content };
type type_val;
std::string content_val;
bool eol;
bool ws_only;
bool marked;
type m_type;
std::string m_content;
bool m_eol;
bool m_ws_only;
bool m_marked;
type token_info(char c);
};
}

View File

@ -2,6 +2,18 @@
#include <boost/algorithm/string/replace.hpp>
mstch::citer mstch::first_not_ws(mstch::citer begin, mstch::citer end) {
for(auto it = begin; it != end; ++it)
if(*it != ' ') return it;
return end;
}
mstch::citer mstch::first_not_ws(mstch::criter begin, mstch::criter end) {
for(auto rit = begin; rit != end; ++rit)
if(*rit != ' ') return --(rit.base());
return --(end.base());
}
std::string mstch::html_escape(std::string str) {
boost::replace_all(str, "&", "&amp;");
boost::replace_all(str, "'", "&#39;");

View File

@ -4,6 +4,10 @@
#include <string>
namespace mstch {
using citer = std::string::const_iterator;
using criter = std::string::const_reverse_iterator;
citer first_not_ws(citer begin, citer end);
citer first_not_ws(criter begin, criter end);
std::string html_escape(std::string str);
}