added site files
This commit is contained in:
parent
a6f70a6c78
commit
329148c253
253 changed files with 30486 additions and 0 deletions
114
EnlighterJS/Source/Tokenizer/Standard.js
Normal file
114
EnlighterJS/Source/Tokenizer/Standard.js
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
---
|
||||
description: Enlighter`s Standard Tokenizer Engine
|
||||
|
||||
license: MIT-style
|
||||
|
||||
authors:
|
||||
- Andi Dittrich
|
||||
|
||||
requires:
|
||||
- Core/1.4.5
|
||||
|
||||
provides: [Tokenizer.Standard]
|
||||
...
|
||||
*/
|
||||
EJS.Tokenizer.Standard = new Class({
|
||||
|
||||
initialize : function(){
|
||||
},
|
||||
|
||||
getTokens : function(language, code){
|
||||
// create token object
|
||||
var token = (function(text, alias, index){
|
||||
return {
|
||||
text: text,
|
||||
alias: alias,
|
||||
index: index,
|
||||
length: text.length,
|
||||
end: text.length + index
|
||||
}
|
||||
});
|
||||
|
||||
// token list
|
||||
var rawTokens = this.getPreprocessedTokens(token);
|
||||
|
||||
// apply each rule to given sourcecode string
|
||||
Array.each(language.getRules(), function(rule){
|
||||
var match;
|
||||
|
||||
// find ALL possible matches (also overlapping ones!)
|
||||
while (match = rule.pattern.exec(code)){
|
||||
// overrides the usual regex behaviour of not matching results that overlap
|
||||
rule.pattern.lastIndex = match.index+1;
|
||||
|
||||
// matching groups used ?
|
||||
if (match.length == 1) {
|
||||
rawTokens.push(token(match[0], rule.alias, match.index));
|
||||
// use full pattern
|
||||
}else{
|
||||
// get first matched group
|
||||
for (var i = 1; i < match.length; i++) {
|
||||
if (match[i] && match[i].length > 0){
|
||||
rawTokens.push(token(match[i], rule.alias, match.index + match[0].indexOf(match[i])));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// sort tokens by index (first occurrence)
|
||||
rawTokens = rawTokens.sort(function(token1, token2){
|
||||
return token1.index - token2.index;
|
||||
});
|
||||
|
||||
// cleaned token list to render
|
||||
var tokens = [];
|
||||
|
||||
// last token position
|
||||
var lastTokenEnd = 0;
|
||||
|
||||
// iterate over raw token list and retain the first match - drop overlaps
|
||||
for (var i=0; i<rawTokens.length; i++){
|
||||
// unmatched text between tokens ?
|
||||
if (lastTokenEnd < rawTokens[i].index ){
|
||||
// create new start text token
|
||||
tokens.push(token(code.substring(lastTokenEnd, rawTokens[i].index), '', lastTokenEnd));
|
||||
}
|
||||
|
||||
// push current token to list
|
||||
tokens.push(rawTokens[i]);
|
||||
|
||||
// store last token position
|
||||
lastTokenEnd = rawTokens[i].end;
|
||||
|
||||
// find next, non overlapping token
|
||||
var nextTokenFound = false;
|
||||
for (var j = i + 1; j < rawTokens.length; j++){
|
||||
if (rawTokens[j].index >= lastTokenEnd){
|
||||
// the "current" token -> i will be incremented in the next loop => j-1
|
||||
i = j-1;
|
||||
nextTokenFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// final position reached ?
|
||||
if (nextTokenFound===false){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// text fragments complete ? or is the final one missing ?
|
||||
if (lastTokenEnd < code.length){
|
||||
tokens.push(token(code.substring(lastTokenEnd), '', lastTokenEnd));
|
||||
}
|
||||
|
||||
return tokens;
|
||||
},
|
||||
|
||||
// token pre-processing; can be overloaded by extending class
|
||||
getPreprocessedTokens: function(token){
|
||||
return [];
|
||||
}
|
||||
});
|
68
EnlighterJS/Source/Tokenizer/Xml.js
Normal file
68
EnlighterJS/Source/Tokenizer/Xml.js
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
---
|
||||
description: XML parser engine for EnlighterJS
|
||||
|
||||
license: MIT-style
|
||||
|
||||
authors:
|
||||
- Andi Dittrich
|
||||
- Jose Prado
|
||||
|
||||
requires:
|
||||
- Core/1.4.5
|
||||
|
||||
provides: [EnlighterJS.Tokenizer.Xml]
|
||||
...
|
||||
*/
|
||||
EnlighterJS.Tokenizer.Xml = new Class({
|
||||
|
||||
Extends : EnlighterJS.Tokenizer.Standard,
|
||||
|
||||
code: null,
|
||||
|
||||
/**
|
||||
* Store code to pre-process XML
|
||||
*/
|
||||
getTokens : function(language, code){
|
||||
this.code = code;
|
||||
return this.parent(language, code);
|
||||
},
|
||||
|
||||
/**
|
||||
* XML Syntax is preprocessed
|
||||
*/
|
||||
getPreprocessedTokens: function(token){
|
||||
// token list
|
||||
var rawTokens = [];
|
||||
|
||||
// Tags + attributes matching and preprocessing.
|
||||
var tagPattern = /((?:\<|<)[A-Z:_][A-Z0-9:.-]*)([\s\S]*?)(\/?(?:\>|>))/gi;
|
||||
var attPattern = /\b([\w:-]+)([ \t]*)(=)([ \t]*)(['"][^'"]+['"]|[^'" \t]+)/gi;
|
||||
|
||||
// tmp storage
|
||||
var match = null;
|
||||
var attMatch = null;
|
||||
var index = 0;
|
||||
|
||||
// Create array of matches containing opening tags, attributes, values, and separators.
|
||||
while ((match = tagPattern.exec(this.code)) != null){
|
||||
rawTokens.push(token(match[1], 'kw1', match.index));
|
||||
while ((attMatch = attPattern.exec(match[2])) != null){
|
||||
// Attributes
|
||||
index = match.index + match[1].length + attMatch.index;
|
||||
rawTokens.push(token(attMatch[1], 'kw2', index));
|
||||
|
||||
// Separators (=)
|
||||
index += attMatch[1].length + attMatch[2].length;
|
||||
rawTokens.push(token(attMatch[3], 'kw1', index));
|
||||
|
||||
// Values
|
||||
index += attMatch[3].length + attMatch[4].length;
|
||||
rawTokens.push(token(attMatch[5], 'st0', index));
|
||||
}
|
||||
rawTokens.push(token(match[3], 'kw1', match.index + match[1].length + match[2].length));
|
||||
}
|
||||
|
||||
return rawTokens;
|
||||
}
|
||||
});
|
Loading…
Add table
Add a link
Reference in a new issue