Skip to content

Commit

Permalink
Merge branch 'semantic-tokens'
Browse files Browse the repository at this point in the history
  • Loading branch information
puremourning committed Apr 21, 2021
2 parents 88e7f40 + 2327c58 commit b10bc35
Show file tree
Hide file tree
Showing 5 changed files with 278 additions and 49 deletions.
4 changes: 4 additions & 0 deletions ycmd/completers/completer.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,6 +370,10 @@ def ComputeSignaturesInner( self, request_data ):
return {}


def ComputeSemanticTokens( self, request_data ):
return {}


def DefinedSubcommands( self ):
subcommands = sorted( self.GetSubcommandsMap().keys() )
try:
Expand Down
208 changes: 177 additions & 31 deletions ycmd/completers/language_server/language_server_completer.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,11 @@ class ResponseAbortedException( Exception ):

class ResponseFailedException( Exception ):
"""Raised by LanguageServerConnection if a request returns an error"""
pass # pragma: no cover
def __init__( self, error ):
self.error_code = error.get( 'code' ) or 0
self.error_message = error.get( 'message' ) or "No message"
super().__init__( f'Request failed: { self.error_code }: '
f'{ self.error_message }' )


class IncompatibleCompletionException( Exception ):
Expand Down Expand Up @@ -212,11 +216,7 @@ def AwaitResponse( self, timeout ):

if 'error' in self._message:
error = self._message[ 'error' ]
raise ResponseFailedException(
'Request failed: '
f'{ error.get( "code" ) or 0 }'
': '
f'{ error.get( "message" ) or "No message" }' )
raise ResponseFailedException( error )

return self._message

Expand Down Expand Up @@ -1279,7 +1279,7 @@ def ComputeCandidatesInner( self, request_data, codepoint ):
if not self._is_completion_provider:
return None, False

self._UpdateServerWithFileContents( request_data )
self._UpdateServerWithCurrentFileContents( request_data )

request_id = self.GetConnection().NextRequestId()

Expand Down Expand Up @@ -1503,18 +1503,18 @@ def SignatureHelpAvailable( self ):
else:
return responses.SignatureHelpAvailalability.NOT_AVAILABLE


def ComputeSignaturesInner( self, request_data ):
if not self.ServerIsReady():
return {}

if not self._server_capabilities.get( 'signatureHelpProvider' ):
return {}

self._UpdateServerWithFileContents( request_data )
self._UpdateServerWithCurrentFileContents( request_data )

request_id = self.GetConnection().NextRequestId()
msg = lsp.SignatureHelp( request_id, request_data )

response = self.GetConnection().GetResponse( request_id,
msg,
REQUEST_TIMEOUT_COMPLETION )
Expand Down Expand Up @@ -1543,6 +1543,63 @@ def ComputeSignaturesInner( self, request_data ):
return result


def ComputeSemanticTokens( self, request_data ):
if not self._initialize_event.wait( REQUEST_TIMEOUT_COMPLETION ):
return {}

if not self._ServerIsInitialized():
return {}

self._UpdateServerWithCurrentFileContents( request_data )

server_config = self._server_capabilities.get( 'semanticTokensProvider' )
if server_config is None:
return {}

atlas = TokenAtlas( server_config[ 'legend' ] )

server_full_support = server_config.get( 'full' )
if server_full_support == {}:
server_full_support = True

if not server_full_support:
return {}

request_id = self.GetConnection().NextRequestId()

# Retry up to 3 times to avoid ContentModified errors
MAX_RETRY = 3
for i in range( MAX_RETRY ):
try:
response = self._connection.GetResponse(
request_id,
lsp.SemanticTokens(
request_id,
request_data ),
3 * REQUEST_TIMEOUT_COMPLETION )
break
except ResponseFailedException as e:
if i < ( MAX_RETRY - 1 ) and e.error_code == lsp.Errors.ContentModified:
continue
else:
raise

if response is None:
return {}

filename = request_data[ 'filepath' ]
contents = GetFileLines( request_data, filename )
result = response.get( 'result' ) or {}
tokens = _DecodeSemanticTokens( atlas,
result.get( 'data' ) or [],
filename,
contents )

return {
'tokens': tokens
}


def GetDetailedDiagnostic( self, request_data ):
self._UpdateServerWithFileContents( request_data )

Expand Down Expand Up @@ -1985,6 +2042,14 @@ def _AnySupportedFileType( self, file_types ):
return False


def _UpdateServerWithCurrentFileContents( self, request_data ):
file_name = request_data[ 'filepath' ]
contents = GetFileContents( request_data, file_name )
filetypes = request_data[ 'filetypes' ]
with self._server_info_mutex:
self._RefreshFileContentsUnderLock( file_name, contents, filetypes )


def _UpdateServerWithFileContents( self, request_data ):
"""Update the server with the current contents of all open buffers, and
close any buffers no longer open.
Expand All @@ -1997,6 +2062,32 @@ def _UpdateServerWithFileContents( self, request_data ):
self._PurgeMissingFilesUnderLock( files_to_purge )


def _RefreshFileContentsUnderLock( self, file_name, contents, file_types ):
file_state = self._server_file_state[ file_name ]
action = file_state.GetDirtyFileAction( contents )

LOGGER.debug( 'Refreshing file %s: State is %s/action %s',
file_name,
file_state.state,
action )

if action == lsp.ServerFileState.OPEN_FILE:
msg = lsp.DidOpenTextDocument( file_state,
file_types,
contents )

self.GetConnection().SendNotification( msg )
elif action == lsp.ServerFileState.CHANGE_FILE:
# FIXME: DidChangeTextDocument doesn't actually do anything
# different from DidOpenTextDocument other than send the right
# message, because we don't actually have a mechanism for generating
# the diffs. This isn't strictly necessary, but might lead to
# performance problems.
msg = lsp.DidChangeTextDocument( file_state, contents )

self.GetConnection().SendNotification( msg )


def _UpdateDirtyFilesUnderLock( self, request_data ):
for file_name, file_data in request_data[ 'file_data' ].items():
if not self._AnySupportedFileType( file_data[ 'filetypes' ] ):
Expand All @@ -2007,29 +2098,10 @@ def _UpdateDirtyFilesUnderLock( self, request_data ):
self.SupportedFiletypes() )
continue

file_state = self._server_file_state[ file_name ]
action = file_state.GetDirtyFileAction( file_data[ 'contents' ] )
self._RefreshFileContentsUnderLock( file_name,
file_data[ 'contents' ],
file_data[ 'filetypes' ] )

LOGGER.debug( 'Refreshing file %s: State is %s/action %s',
file_name,
file_state.state,
action )

if action == lsp.ServerFileState.OPEN_FILE:
msg = lsp.DidOpenTextDocument( file_state,
file_data[ 'filetypes' ],
file_data[ 'contents' ] )

self.GetConnection().SendNotification( msg )
elif action == lsp.ServerFileState.CHANGE_FILE:
# FIXME: DidChangeTextDocument doesn't actually do anything
# different from DidOpenTextDocument other than send the right
# message, because we don't actually have a mechanism for generating
# the diffs. This isn't strictly necessary, but might lead to
# performance problems.
msg = lsp.DidChangeTextDocument( file_state, file_data[ 'contents' ] )

self.GetConnection().SendNotification( msg )


def _UpdateSavedFilesUnderLock( self, request_data ):
Expand Down Expand Up @@ -3329,3 +3401,77 @@ def on_deleted( self, event ):
with self._server._server_info_mutex:
msg = lsp.DidChangeWatchedFiles( event.src_path, 'delete' )
self._server.GetConnection().SendNotification( msg )


class TokenAtlas:
def __init__( self, legend ):
self.tokenTypes = legend[ 'tokenTypes' ]
self.tokenModifiers = legend[ 'tokenModifiers' ]


def _DecodeSemanticTokens( atlas, token_data, filename, contents ):
assert len( token_data ) % 5 == 0

class Token:
line = 0
start_character = 0
num_characters = 0
token_type = 0
token_modifiers = 0

def DecodeModifiers( self, tokenModifiers ):
modifiers = []
bit_index = 0
while True:
bit_value = pow( 2, bit_index )

if bit_value > self.token_modifiers:
break

if self.token_modifiers & bit_value:
modifiers.append( tokenModifiers[ bit_index ] )

bit_index += 1

return modifiers


last_token = Token()
tokens = []

for token_index in range( 0, len( token_data ), 5 ):
token = Token()

token.line = last_token.line + token_data[ token_index ]

token.start_character = token_data[ token_index + 1 ]
if token.line == last_token.line:
token.start_character += last_token.start_character

token.num_characters = token_data[ token_index + 2 ]

token.token_type = token_data[ token_index + 3 ]
token.token_modifiers = token_data[ token_index + 4 ]

tokens.append( {
'range': responses.BuildRangeData( _BuildRange(
contents,
filename,
{
'start': {
'line': token.line,
'character': token.start_character,
},
'end': {
'line': token.line,
'character': token.start_character + token.num_characters,
}
}
) ),
'type': atlas.tokenTypes[ token.token_type ],
'modifiers': token.DecodeModifiers( atlas.tokenModifiers )
} )

last_token = token

return tokens
Loading

0 comments on commit b10bc35

Please sign in to comment.