simplify code
This commit is contained in:
parent
16783f76e3
commit
4d6744dc63
@ -13,5 +13,6 @@ std::string mstch::render(
|
||||
std::map<std::string, template_type> partial_templates;
|
||||
for (auto& partial: partials)
|
||||
partial_templates.insert({partial.first, {partial.second}});
|
||||
|
||||
return render_context(root, partial_templates).render(tmplt);
|
||||
}
|
||||
|
@ -15,18 +15,20 @@ std::string in_section::render(render_context& ctx, const token& token) {
|
||||
if (token.name() == section_name && skipped_openings == 0) {
|
||||
auto& node = ctx.get_node(section_name);
|
||||
std::string out;
|
||||
|
||||
if (m_type == type::normal && !visit(is_node_empty(), node))
|
||||
out = visit(render_section(ctx, section), node);
|
||||
else if (m_type == type::inverted && visit(is_node_empty(), node))
|
||||
out = render_context::push(ctx).render(section);
|
||||
|
||||
ctx.set_state<outside_section>();
|
||||
return out;
|
||||
} else {
|
||||
} else
|
||||
skipped_openings--;
|
||||
}
|
||||
else if (token.token_type() == token::type::inverted_section_open ||
|
||||
token.token_type() == token::type::section_open)
|
||||
skipped_openings++;
|
||||
|
||||
section << token;
|
||||
return "";
|
||||
}
|
||||
|
@ -18,32 +18,43 @@ void template_type::process_text(citer begin, citer end) {
|
||||
}
|
||||
}
|
||||
|
||||
void template_type::tokenize(const std::string& tmplt) {
|
||||
std::string o{"{{"}, c{"}}"};
|
||||
citer beg = tmplt.begin();
|
||||
for (unsigned long pos = 0; pos < tmplt.size();) {
|
||||
auto to = tmplt.find(o, pos);
|
||||
auto tc = tmplt.find(c, (to == std::string::npos)?to:(to + 1));
|
||||
if (tc != std::string::npos && to != std::string::npos) {
|
||||
if (*(beg + to + o.size()) == '{' && *(beg + tc + c.size()) == '}')
|
||||
++tc;
|
||||
process_text(beg + pos, beg + to);
|
||||
pos = tc + c.size();
|
||||
tokens.push_back({{beg + to, beg + tc + c.size()}, o.size(), c.size()});
|
||||
if (*(beg + to + o.size()) == '=' && *(beg + tc - 1) == '=') {
|
||||
o = {beg + to + o.size() + 1, beg + tmplt.find(' ', to)};
|
||||
c = {beg + tmplt.find(' ', to) + 1, beg + tc - 1};
|
||||
void template_type::tokenize(const std::string& tmp) {
|
||||
std::string open{"{{"}, close{"}}"};
|
||||
citer beg = tmp.begin();
|
||||
auto npos = std::string::npos;
|
||||
|
||||
for (unsigned long cur_pos = 0; cur_pos < tmp.size();) {
|
||||
auto open_pos = tmp.find(open, cur_pos);
|
||||
auto close_pos = tmp.find(
|
||||
close, (open_pos == npos) ? open_pos : (open_pos + 1));
|
||||
|
||||
if (close_pos != npos && open_pos != npos) {
|
||||
if (*(beg + open_pos + open.size()) == '{' &&
|
||||
*(beg + close_pos + close.size()) == '}')
|
||||
++close_pos;
|
||||
|
||||
process_text(beg + cur_pos, beg + open_pos);
|
||||
cur_pos = close_pos + close.size();
|
||||
tokens.push_back({{beg + open_pos, beg + close_pos + close.size()},
|
||||
open.size(), close.size()});
|
||||
|
||||
if (*(beg + open_pos + open.size()) == '=' &&
|
||||
*(beg + close_pos - 1) == '=')
|
||||
{
|
||||
open = {beg + open_pos + open.size() + 1, beg + tmp.find(' ',open_pos)};
|
||||
close = {beg + tmp.find(' ', open_pos) + 1, beg + close_pos - 1};
|
||||
}
|
||||
} else {
|
||||
process_text(beg + pos, tmplt.end());
|
||||
pos = tc;
|
||||
process_text(beg + cur_pos, tmp.end());
|
||||
cur_pos = close_pos;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void template_type::strip_whitespace() {
|
||||
auto lbegin = tokens.begin();
|
||||
auto line_begin = tokens.begin();
|
||||
bool has_tag = false, non_space = false;
|
||||
|
||||
for (auto it = tokens.begin(); it != tokens.end(); ++it) {
|
||||
auto type = (*it).token_type();
|
||||
if (type != token::type::text && type != token::type::variable &&
|
||||
@ -51,12 +62,15 @@ void template_type::strip_whitespace() {
|
||||
has_tag = true;
|
||||
else if (!(*it).ws_only())
|
||||
non_space = true;
|
||||
|
||||
if ((*it).eol()) {
|
||||
if (has_tag && !non_space)
|
||||
for (auto c = lbegin; it != c-1; c = (*c).ws_only()?tokens.erase(c):++c)
|
||||
it = (*c).eol()?c-1:it;
|
||||
for (auto cur = line_begin; it != cur - 1;
|
||||
cur = (*cur).ws_only() ? tokens.erase(cur) : ++cur)
|
||||
it = (*cur).eol() ? cur - 1 : it;
|
||||
|
||||
non_space = has_tag = false;
|
||||
lbegin = it + 1;
|
||||
line_begin = it + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ class template_type {
|
||||
std::vector<token> tokens;
|
||||
void strip_whitespace();
|
||||
void process_text(citer beg, citer end);
|
||||
void tokenize(const std::string& tmplt);
|
||||
void tokenize(const std::string& tmp);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -15,10 +15,12 @@ mstch::citer mstch::first_not_ws(mstch::criter begin, mstch::criter end) {
|
||||
std::string mstch::html_escape(const std::string& str) {
|
||||
std::string out;
|
||||
citer start = str.begin();
|
||||
|
||||
auto add_escape = [&out, &start](const std::string& escaped, citer& it) {
|
||||
out += std::string{start, it} + escaped;
|
||||
start = it + 1;
|
||||
};
|
||||
|
||||
for (auto it = str.begin(); it != str.end(); ++it)
|
||||
switch (*it) {
|
||||
case '&': add_escape("&", it); break;
|
||||
@ -29,5 +31,6 @@ std::string mstch::html_escape(const std::string& str) {
|
||||
case '/': add_escape("/", it); break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
return out + std::string{start, str.end()};
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ inline std::string render_section::operator()(const lambda& fun) const {
|
||||
std::string section_str;
|
||||
for(auto& token: section)
|
||||
section_str += token.raw();
|
||||
|
||||
return fun(section_str, [this](const std::string& str) {
|
||||
return render_context::push(ctx).render(template_type{str});
|
||||
});
|
||||
|
Loading…
Reference in New Issue
Block a user